diff --git a/.github/ISSUE_TEMPLATE/bug.yaml b/.github/ISSUE_TEMPLATE/bug.yaml new file mode 100644 index 00000000000..bfad0912cab --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug.yaml @@ -0,0 +1,52 @@ +name: 'Bug report' +description: 'File a bug report' +labels: ['bug'] +body: +- type: 'markdown' + attributes: + value: |- + Thank you for filing an bug. Please complete the form below so we can triage, reproduce, and fix your issue. + +- id: 'tldr' + type: 'textarea' + attributes: + label: 'TL;DR' + description: 'Describe the bug in 1-2 sentences.' + validations: + required: true + +- id: 'expected_behavior' + type: 'textarea' + attributes: + label: 'Expected behavior' + description: 'What did you expect to happen?' + +- id: 'observed_behavior' + type: 'textarea' + attributes: + label: 'Observed behavior' + description: 'What happened instead?' + +- id: 'tf_config' + type: 'textarea' + attributes: + label: 'Terraform Configuration' + description: 'Paste your reproducible Terraform configuration, removing any sensitive values.' + render: 'hcl' + validations: + required: true + +- id: 'tf_version' + type: 'textarea' + attributes: + label: 'Terraform Version' + description: 'Paste the output of `terraform version`, removing any sensitive values.' + render: 'sh' + validations: + required: true + +- id: 'additional' + type: 'textarea' + attributes: + label: 'Additional information' + description: 'Is there anything else you think we should know?' diff --git a/.github/ISSUE_TEMPLATE/feature.yaml b/.github/ISSUE_TEMPLATE/feature.yaml new file mode 100644 index 00000000000..3f4561ab2ff --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature.yaml @@ -0,0 +1,36 @@ +name: 'Feature request' +description: 'File a feature request' +labels: ['enhancement'] +body: +- type: 'markdown' + attributes: + value: |- + Thank you for requesting a feature. Please complete the form below so we can triage and prioritize your feature. + +- id: 'component' + type: 'textarea' + attributes: + label: 'Component' + description: 'Which component is this FR for? Examples: CLI, Devtools, Linter, Test framework etc' + render: 'markdown' + +- id: 'tldr' + type: 'textarea' + attributes: + label: 'TL;DR' + description: 'Describe the feature in 1-2 sentences.' + validations: + required: true + +- id: 'detailed_design' + type: 'textarea' + attributes: + label: 'Detailed design' + description: 'Do you have more information about a detailed design? Are there specific considerations to take? Include sample configuration if possible.' + render: 'markdown' + +- id: 'additional' + type: 'textarea' + attributes: + label: 'Additional information' + description: 'Is there anything else you think we should know?' diff --git a/.github/ISSUE_TEMPLATE/issue-template.md b/.github/ISSUE_TEMPLATE/issue-template.md deleted file mode 100644 index cc20b140c19..00000000000 --- a/.github/ISSUE_TEMPLATE/issue-template.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -name: Issue template -about: Describe this issue template's purpose here. -title: '' -labels: '' -assignees: '' - ---- - -############################ NOTE ####################### - -Dear CFT User! - -If you are looking to build new GCP infrastructure, we recommend that you use [Terraform CFT modules](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/blob/master/docs/terraform.md) -Terraform CFT supports the most recent GCP resources, reflects GCP best practices can be used off-the-shelf to quickly build a repeatable enterprise-ready foundation. -Additionally, if you are a looking to manage your GCP resources through Kubernetes, consider using [Config Connector CFT solutions](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/tree/master/config-connector/solutions). - - -###################### REMOVE NOTE ABOVE ################# diff --git a/.github/conventional-commit-lint.yaml b/.github/conventional-commit-lint.yaml new file mode 100644 index 00000000000..c967ffa6fe6 --- /dev/null +++ b/.github/conventional-commit-lint.yaml @@ -0,0 +1,2 @@ +enabled: true +always_check_pr_title: true diff --git a/.github/issue_template.md b/.github/issue_template.md deleted file mode 100644 index db2b29393fb..00000000000 --- a/.github/issue_template.md +++ /dev/null @@ -1,5 +0,0 @@ -Dear CFT User! - -If you are looking to build new GCP infrastructure, we recommend that you use [Terraform CFT modules](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/blob/master/docs/terraform.md) -Terraform CFT supports the most recent GCP resources, reflects GCP best practices can be used off-the-shelf to quickly build a repeatable enterprise-ready foundation. -Additionally, if you are a looking to manage your GCP resources through Kubernetes, consider using [Config Connector CFT solutions](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/tree/master/config-connector/solutions). diff --git a/.github/release-please.yml b/.github/release-please.yml new file mode 100644 index 00000000000..396921f1683 --- /dev/null +++ b/.github/release-please.yml @@ -0,0 +1,16 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +handleGHRelease: true +manifest: true diff --git a/.github/renovate.json b/.github/renovate.json new file mode 100644 index 00000000000..83b115a48ed --- /dev/null +++ b/.github/renovate.json @@ -0,0 +1,86 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": [ + "mergeConfidence:all-badges", + "config:recommended", + ":semanticCommits", + ":preserveSemverRanges", + "helpers:pinGitHubActionDigests" + ], + "minimumReleaseAge": "7 days", + "ignorePaths": ["tflint-ruleset-blueprint/rules/testdata/**"], + "labels": ["dependencies"], + "vulnerabilityAlerts": { + "labels": ["type:security"], + "minimumReleaseAge": null + }, + "separateMajorMinor": false, + "additionalBranchPrefix": "{{parentDir}}-", + "constraints": { + "go": "1.23" + }, + "packageRules": [ + { + "matchFileNames": [".github/**"], + "extends": [":semanticCommitTypeAll(chore)", ":rebaseStalePrs"] + }, + { + "matchManagers": ["terraform"], + "matchDepNames": ["google", "google-beta"], + "groupName": "terraform google provider", + "rangeStrategy": "widen" + }, + { + "matchManagers": ["terraform"], + "matchDepTypes": ["module"], + "enabled": false + }, + { + "matchDepTypes": ["require"], + "postUpdateOptions": ["gomodTidy", "gomodUpdateImportPaths"], + "groupName": "GO modules" + }, + { + "matchPackageNames": ["go"], + "allowedVersions": "1.23", + "postUpdateOptions": ["gomodTidy", "gomodUpdateImportPaths"] + } + ], + "customManagers": [ + { + "customType": "regex", + "fileMatch": ["(^|/)Makefile$"], + "matchStrings": ["ALPINE_VERSION := (?.*?)\\n"], + "datasourceTemplate": "docker", + "depNameTemplate": "alpine" + }, + { + "customType": "regex", + "fileMatch": ["(^|/)Makefile$"], + "matchStrings": [ + "DOCKER_TAG_VERSION_DEVELOPER_TOOLS := (?.*?)\\n" + ], + "datasourceTemplate": "docker", + "registryUrlTemplate": "https://gcr.io/cloud-foundation-cicd", + "depNameTemplate": "cft/developer-tools" + }, + { + "customType": "regex", + "fileMatch": ["(^|/)build/(int|lint)\\.cloudbuild\\.yaml$"], + "matchStrings": [ + " _DOCKER_TAG_VERSION_DEVELOPER_TOOLS: '(?.*?)'\\n" + ], + "datasourceTemplate": "docker", + "registryUrlTemplate": "https://gcr.io/cloud-foundation-cicd", + "depNameTemplate": "cft/developer-tools" + }, + { + "customType": "regex", + "fileMatch": ["(^|/)Makefile$"], + "matchStrings": ["GOLANGCI_VERSION := (?.*?)\\n"], + "datasourceTemplate": "docker", + "depNameTemplate": "golangci/golangci-lint", + "extractVersionTemplate": "^v?(?.*)$" + } + ] +} diff --git a/.github/trusted-contribution.yml b/.github/trusted-contribution.yml new file mode 100644 index 00000000000..2889bbc6dc6 --- /dev/null +++ b/.github/trusted-contribution.yml @@ -0,0 +1,24 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +annotations: + - type: comment + text: "/gcbrun" +trustedContributors: + - release-please[bot] + - renovate[bot] + - forking-renovate[bot] + - renovate-bot + - dependabot[bot] + - cloud-foundation-bot diff --git a/.github/workflows/build-push-cft-devtools.yml b/.github/workflows/build-push-cft-devtools.yml index fdf35b9d805..e14fffa679e 100644 --- a/.github/workflows/build-push-cft-devtools.yml +++ b/.github/workflows/build-push-cft-devtools.yml @@ -13,14 +13,26 @@ jobs: build-push-dev-tools: name: Build and push new CFT dev tools image runs-on: ubuntu-latest + if: github.repository == 'GoogleCloudPlatform/cloud-foundation-toolkit' + + permissions: + contents: 'read' + id-token: 'write' + issues: 'write' steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - id: 'auth' + name: 'Authenticate to Google Cloud' + uses: 'google-github-actions/auth@6fc4af4b145ae7821d527454aa9bd537d1f2dc5f' # v2.1.7 + with: + workload_identity_provider: '${{ secrets.GCP_WIF_PROVIDER }}' + service_account: '${{ secrets.GCP_WIF_SA_EMAIL }}' - - uses: google-github-actions/setup-gcloud@master + - name: Setup gcloud + uses: google-github-actions/setup-gcloud@6189d56e4096ee891640bb02ac264be376592d6a # v2.1.2 with: - version: "286.0.0" - service_account_key: ${{ secrets.GCP_SA_KEY }} project_id: ${{ env.PROJECT_ID }} - name: Build @@ -28,6 +40,30 @@ jobs: gcloud auth configure-docker -q cd infra/build && make build-image-developer-tools - - name: Push + - name: Filter paths for push + uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2 + id: filter + with: + filters: | + src: + - "infra/build/**" + + - if: steps.filter.outputs.src == 'true' + name: Push + env: + GITHUB_SHA: ${{ github.sha }} run: |- cd infra/build && make release-image-developer-tools + + - name: Open issue if push failed + if: ${{ failure() && steps.filter.outputs.src == 'true' }} + uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + with: + script: |- + github.rest.issues.create({ + owner: context.repo.owner, + repo: context.repo.repo, + title: 'build-push-dev-tools job failed', + body: 'Logs: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}', + assignees: ['bharathkkb','apeabody'] + }) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml new file mode 100644 index 00000000000..c73e0bdf592 --- /dev/null +++ b/.github/workflows/codeql.yml @@ -0,0 +1,92 @@ +# For most projects, this workflow file will not need changing; you simply need +# to commit it to your repository. +# +# You may wish to alter this file to override the set of languages analyzed, +# or to provide custom queries or build logic. +# +# ******** NOTE ******** +# We have attempted to detect the languages in your repository. Please check +# the `language` matrix defined below to confirm you have the correct set of +# supported CodeQL languages. +# +name: "CodeQL" + +on: + push: + branches: [ "master" ] + pull_request: + branches: [ "master" ] + paths: + - '**/*.go' + - '**/*.js' + - '**/*.py' + - '**/*.rb' + schedule: + - cron: '32 11 * * 0' + +jobs: + analyze: + name: Analyze + # Runner size impacts CodeQL analysis time. To learn more, please see: + # - https://gh.io/recommended-hardware-resources-for-running-codeql + # - https://gh.io/supported-runners-and-hardware-resources + # - https://gh.io/using-larger-runners + # Consider using larger runners for possible analysis time improvements. + runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }} + timeout-minutes: ${{ (matrix.language == 'swift' && 120) || 360 }} + permissions: + actions: read + contents: read + security-events: write + + strategy: + fail-fast: false + matrix: + language: [ 'go', 'javascript', 'python', 'ruby' ] + # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby', 'swift' ] + # Use only 'java' to analyze code written in Java, Kotlin or both + # Use only 'javascript' to analyze code written in JavaScript, TypeScript or both + # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support + + steps: + - name: Checkout repository + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Install Go + if: matrix.language == 'go' + uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0 + with: + go-version-file: 'go.work' + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@48ab28a6f5dbc2a99bf1e0131198dd8f1df78169 # v3.28.0 + with: + languages: ${{ matrix.language }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + + # For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs + # queries: security-extended,security-and-quality + + + # Autobuild attempts to build any compiled languages (C/C++, C#, Go, Java, or Swift). + # If this step fails, then you should remove it and run the build manually (see below) + - name: Autobuild + uses: github/codeql-action/autobuild@48ab28a6f5dbc2a99bf1e0131198dd8f1df78169 # v3.28.0 + + # â„šī¸ Command-line programs to run using the OS shell. + # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun + + # If the Autobuild fails above, remove it and uncomment the following three lines. + # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance. + + # - run: | + # echo "Run, Build Application using script" + # ./location_of_script_within_repo/buildscript.sh + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@48ab28a6f5dbc2a99bf1e0131198dd8f1df78169 # v3.28.0 + with: + category: "/language:${{matrix.language}}" diff --git a/.github/workflows/go-fbf-test.yml b/.github/workflows/go-fbf-test.yml new file mode 100644 index 00000000000..78dd80da666 --- /dev/null +++ b/.github/workflows/go-fbf-test.yml @@ -0,0 +1,34 @@ +name: Flaky Build Finder Tests + +on: + pull_request: + branches: + - 'master' + paths: + - 'infra/utils/fbf/**' + - '.github/workflows/go-fbf-test.yml' + +concurrency: + group: '${{github.workflow}}-${{ github.head_ref || github.ref }}' + cancel-in-progress: true + +jobs: + unit: + name: ${{ matrix.operating-system }} unit tests + runs-on: ${{ matrix.operating-system }} + defaults: + run: + shell: bash + working-directory: 'infra/utils/fbf' + strategy: + fail-fast: false + matrix: + operating-system: [ubuntu-latest, macos-latest] + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0 + with: + go-version-file: infra/utils/fbf/go.mod + cache-dependency-path: infra/utils/fbf/go.sum + - run: |- + go test ./... -v diff --git a/.github/workflows/go-lint.yaml b/.github/workflows/go-lint.yaml new file mode 100644 index 00000000000..194cca3bf06 --- /dev/null +++ b/.github/workflows/go-lint.yaml @@ -0,0 +1,45 @@ +name: lint +on: + pull_request: + branches: + - master + paths: + - ".github/workflows/go-lint.yaml" + - "cli/**" + - "infra/blueprint-test/**" + - "tflint-ruleset-blueprint/**" +permissions: + contents: read + +concurrency: + group: '$${{ github.workflow }}-$${{ github.head_ref || github.ref }}' + cancel-in-progress: true + +jobs: + golangci: + name: lint + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + folder: [cli, infra/blueprint-test, tflint-ruleset-blueprint] + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0 + with: + go-version-file: ${{ matrix.folder }}/go.mod + cache-dependency-path: ${{ matrix.folder }}/go.sum + - uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2 + id: changes + with: + filters: | + src: + - ${{ matrix.folder }}/** + - ".github/workflows/go-lint.yaml" + - if: steps.changes.outputs.src == 'true' + name: golangci-lint + uses: golangci/golangci-lint-action@971e284b6050e8a5849b72094c50ab08da042db8 # v6.1.1 + with: + version: latest + working-directory: ${{ matrix.folder }} + args: --timeout=5m diff --git a/.github/workflows/go-module-swapper.yml b/.github/workflows/go-module-swapper.yml new file mode 100644 index 00000000000..ab635c56ba2 --- /dev/null +++ b/.github/workflows/go-module-swapper.yml @@ -0,0 +1,51 @@ +name: Module Swapper Tests + +on: + pull_request: + branches: + - 'master' + paths: + - 'infra/module-swapper/**' + - '.github/workflows/go-module-swapper.yml' + +concurrency: + group: '${{github.workflow}}-${{ github.head_ref || github.ref }}' + cancel-in-progress: true + +jobs: + unit: + name: ${{ matrix.operating-system }} unit tests + runs-on: ${{ matrix.operating-system }} + defaults: + run: + shell: bash + working-directory: 'infra/module-swapper' + strategy: + fail-fast: false + matrix: + operating-system: [ubuntu-latest, macos-latest] + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0 + with: + go-version-file: infra/module-swapper/go.mod + cache-dependency-path: infra/module-swapper/go.sum + - run: |- + go test ./... -v + golangci: + name: lint + runs-on: ubuntu-latest + strategy: + fail-fast: false + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0 + with: + go-version-file: infra/module-swapper/go.mod + cache-dependency-path: infra/module-swapper/go.sum + - name: golangci-lint + uses: golangci/golangci-lint-action@971e284b6050e8a5849b72094c50ab08da042db8 # v6.1.1 + with: + version: latest + working-directory: 'infra/module-swapper' + args: --timeout=5m diff --git a/.github/workflows/go-tflint-plugin.yml b/.github/workflows/go-tflint-plugin.yml new file mode 100644 index 00000000000..ec27edbcd75 --- /dev/null +++ b/.github/workflows/go-tflint-plugin.yml @@ -0,0 +1,60 @@ +name: TFLint blueprint ruleset + +on: + push: + branches: + - 'master' + paths: + - 'tflint-ruleset-blueprint/**' + - '.github/workflows/go-tflint-plugin.yml' + pull_request: + branches: + - 'master' + paths: + - 'tflint-ruleset-blueprint/**' + - '.github/workflows/go-tflint-plugin.yml' + +concurrency: + group: '${{github.workflow}}-${{ github.head_ref || github.ref }}' + cancel-in-progress: true + +jobs: + unit: + name: ${{ matrix.operating-system }} unit tests + runs-on: ${{ matrix.operating-system }} + defaults: + run: + shell: bash + working-directory: 'tflint-ruleset-blueprint' + strategy: + fail-fast: false + matrix: + operating-system: [ubuntu-latest] + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0 + with: + go-version-file: tflint-ruleset-blueprint/go.mod + cache-dependency-path: tflint-ruleset-blueprint/go.sum + - run: |- + make test + releaser: + runs-on: ubuntu-latest + defaults: + run: + working-directory: 'tflint-ruleset-blueprint' + steps: + - name: Checkout + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - name: Set up Go + uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0 + with: + go-version-file: 'tflint-ruleset-blueprint/go.mod' + - run: echo "GORELEASER_CURRENT_TAG=v0.0.0" >> $GITHUB_ENV # sample tag for testing goreleaser + - run: echo "${{env.GORELEASER_CURRENT_TAG}}" + - name: Run GoReleaser + uses: goreleaser/goreleaser-action@9ed2f89a662bf1735a48bc8557fd212fa902bebf # v6.1.0 + with: + version: latest + args: release --clean --skip=validate,publish + workdir: tflint-ruleset-blueprint diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml new file mode 100644 index 00000000000..4276f0b0d7f --- /dev/null +++ b/.github/workflows/lint.yaml @@ -0,0 +1,46 @@ +# Copyright 2023-2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name: 'lint' + +on: + pull_request: + branches: + - 'master' + paths: + - "infra/terraform/**" + - ".github/workflows/lint.yaml" + +concurrency: + group: '${{ github.workflow }}-${{ github.head_ref || github.ref }}' + cancel-in-progress: true + +jobs: + lint: + name: 'lint-infra-terraform' + runs-on: 'ubuntu-latest' + steps: + - uses: 'actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683' # v4.2.2 + - name: Cache lint-infra-terraform + id: cache-lint-infra-terraform + uses: 'actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57' # v4.2.0 + with: + path: | + ${{ github.workspace }}/test/integration/tmp/.terraform + key: cache-lint-infra-terraform-${{ github.run_id }} + restore-keys: cache-lint-infra-terraform + - run: docker run --rm -e EXCLUDE_LINT_DIRS -e EXCLUDE_HEADER_CHECK -v ${{ github.workspace }}:/workspace gcr.io/cloud-foundation-cicd/cft/developer-tools:1 /usr/local/bin/test_lint.sh + env: + EXCLUDE_LINT_DIRS: '\./cli|\./tflint-ruleset-blueprint|\./infra/build|\./infra/utils|\./infra/blueprint-test|\./infra/concourse|\./infra/modules|\./reports|\./.github|\./docs|\./infra/module-swapper' + EXCLUDE_HEADER_CHECK: '.*' diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml new file mode 100644 index 00000000000..8ec1e8d550f --- /dev/null +++ b/.github/workflows/pre-commit.yml @@ -0,0 +1,14 @@ +name: pre-commit + +on: + pull_request: + push: + branches: [main] + +jobs: + pre-commit: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 + - uses: pre-commit/action@2c7b3805fd2a0fd8c1884dcaebf91fc102a13ecd # v3.0.1 diff --git a/.github/workflows/release-cli.yml b/.github/workflows/release-cli.yml index ed51368de3e..e044bd20af0 100644 --- a/.github/workflows/release-cli.yml +++ b/.github/workflows/release-cli.yml @@ -1,26 +1,46 @@ name: Release new CLI versions on: push: - branches: + branches: - "master" paths: - "cli/Makefile" + - ".github/workflows/release-cli.yml" env: CLI_BUCKET: gs://cft-cli RELEASE_URL: https://api.github.com/repos/GoogleCloudPlatform/cloud-foundation-toolkit/releases/latest jobs: release-new-cli: - name: Build and push new CFT dev tools image + name: Release new CLI versions runs-on: ubuntu-latest + if: github.repository == 'GoogleCloudPlatform/cloud-foundation-toolkit' + + permissions: + contents: 'write' + id-token: 'write' steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0 + with: + go-version-file: cli/go.mod + cache-dependency-path: cli/go.sum - - uses: google-github-actions/setup-gcloud@master + - id: 'auth' + name: 'Authenticate to Google Cloud' + uses: 'google-github-actions/auth@6fc4af4b145ae7821d527454aa9bd537d1f2dc5f' # v2.1.7 with: - version: "286.0.0" - service_account_key: ${{ secrets.GCP_SA_KEY }} + workload_identity_provider: '${{ secrets.GCP_WIF_PROVIDER }}' + service_account: '${{ secrets.GCP_WIF_SA_EMAIL }}' + + - uses: google-github-actions/setup-gcloud@6189d56e4096ee891640bb02ac264be376592d6a # v2.1.2 + with: + version: "410.0.0" + + # used by make release which runs gcloud alpha storage + - name: Install gcloud alpha commands + run: gcloud components install alpha - name: Retrieve last released version run: |- @@ -35,7 +55,7 @@ jobs: chmod +x bin/* - name: Archive build artifacts - uses: actions/upload-artifact@v1 + uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b # v4.5.0 with: name: bin path: cli/bin @@ -55,18 +75,18 @@ jobs: - name: Create Release if: env.LAST_VERSION != env.CURRENT_VERSION id: create_release - uses: actions/create-release@v1 + uses: actions/create-release@0cb9c9b65d5d1901c1f53e5e66eaf4afd303e70e # v1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: - tag_name: ${{ env.CURRENT_VERSION }} + tag_name: cli/${{ env.CURRENT_VERSION }} release_name: CLI Release ${{ env.CURRENT_VERSION }} draft: false prerelease: false - name: Upload Linux Release if: env.LAST_VERSION != env.CURRENT_VERSION - uses: actions/upload-release-asset@v1 + uses: actions/upload-release-asset@e8f9f06c4b078e705bd2ea027f0926603fc9b4d5 # v1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: @@ -77,7 +97,7 @@ jobs: - name: Upload Darwin Release if: env.LAST_VERSION != env.CURRENT_VERSION - uses: actions/upload-release-asset@v1 + uses: actions/upload-release-asset@e8f9f06c4b078e705bd2ea027f0926603fc9b4d5 # v1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: @@ -88,7 +108,7 @@ jobs: - name: Upload Windows Release if: env.LAST_VERSION != env.CURRENT_VERSION - uses: actions/upload-release-asset@v1 + uses: actions/upload-release-asset@e8f9f06c4b078e705bd2ea027f0926603fc9b4d5 # v1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: diff --git a/.github/workflows/release-tflint-plugin.yml b/.github/workflows/release-tflint-plugin.yml new file mode 100644 index 00000000000..9daec895a84 --- /dev/null +++ b/.github/workflows/release-tflint-plugin.yml @@ -0,0 +1,39 @@ +name: release-tflint + +on: + push: + tags: + - tflint-ruleset-blueprint/v*.*.* + +permissions: + contents: write + id-token: write + +jobs: + release: + runs-on: ubuntu-latest + defaults: + run: + working-directory: 'tflint-ruleset-blueprint' + steps: + - name: Checkout + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - name: Set up Go + uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0 + with: + go-version-file: 'tflint-ruleset-blueprint/go.mod' + - run: echo "GORELEASER_CURRENT_TAG=${GITHUB_REF#refs/tags/tflint-ruleset-blueprint/}" >> $GITHUB_ENV + - run: echo "${{env.GORELEASER_CURRENT_TAG}}" + - name: Run GoReleaser + uses: goreleaser/goreleaser-action@9ed2f89a662bf1735a48bc8557fd212fa902bebf # v6.1.0 + with: + version: latest + args: release --clean --skip=validate,publish + workdir: tflint-ruleset-blueprint + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: upload + run: | + gh release upload "tflint-ruleset-blueprint/${{env.GORELEASER_CURRENT_TAG}}" dist/tflint-ruleset-blueprint_*.zip dist/checksums.txt --repo ${{ github.repository }} --clobber + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml new file mode 100644 index 00000000000..f38f82e9262 --- /dev/null +++ b/.github/workflows/scorecard.yml @@ -0,0 +1,72 @@ +# This workflow uses actions that are not certified by GitHub. They are provided +# by a third-party and are governed by separate terms of service, privacy +# policy, and support documentation. + +name: Scorecard supply-chain security +on: + # For Branch-Protection check. Only the default branch is supported. See + # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection + branch_protection_rule: + # To guarantee Maintained check is occasionally updated. See + # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained + schedule: + - cron: '18 1 * * 1' + push: + branches: [ "master" ] + +# Declare default permissions as read only. +permissions: read-all + +jobs: + analysis: + name: Scorecard analysis + runs-on: ubuntu-latest + permissions: + # Needed to upload the results to code-scanning dashboard. + security-events: write + # Needed to publish results and get a badge (see publish_results below). + id-token: write + # Uncomment the permissions below if installing in a private repository. + # contents: read + # actions: read + + steps: + - name: "Checkout code" + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + persist-credentials: false + + - name: "Run analysis" + uses: ossf/scorecard-action@62b2cac7ed8198b15735ed49ab1e5cf35480ba46 # v2.4.0 + with: + results_file: results.sarif + results_format: sarif + # (Optional) "write" PAT token. Uncomment the `repo_token` line below if: + # - you want to enable the Branch-Protection check on a *public* repository, or + # - you are installing Scorecard on a *private* repository + # To create the PAT, follow the steps in https://github.com/ossf/scorecard-action#authentication-with-pat. + # repo_token: ${{ secrets.SCORECARD_TOKEN }} + + # Public repositories: + # - Publish results to OpenSSF REST API for easy access by consumers + # - Allows the repository to include the Scorecard badge. + # - See https://github.com/ossf/scorecard-action#publishing-results. + # For private repositories: + # - `publish_results` will always be set to `false`, regardless + # of the value entered here. + publish_results: true + + # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF + # format to the repository Actions tab. + - name: "Upload artifact" + uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b # v4.5.0 + with: + name: SARIF file + path: results.sarif + retention-days: 5 + + # Upload the results to GitHub's code scanning dashboard. + - name: "Upload to code-scanning" + uses: github/codeql-action/upload-sarif@48ab28a6f5dbc2a99bf1e0131198dd8f1df78169 # v3.28.0 + with: + sarif_file: results.sarif diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 00000000000..3d682a38a17 --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,34 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name: "Close stale issues" +on: + schedule: + - cron: "0 23 * * *" + +permissions: + issues: write + pull-requests: write + +jobs: + stale: + if: github.repository_owner == 'GoogleCloudPlatform' + runs-on: ubuntu-latest + steps: + - uses: actions/stale@28ca1036281a5e5922ead5184a1bbf96e5fc984e # v9 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + stale-issue-message: 'This issue is stale because it has been open 60 days with no activity. Remove stale label or comment or this will be closed in 7 days' + stale-pr-message: 'This PR is stale because it has been open 60 days with no activity. Remove stale label or comment or this will be closed in 7 days' + exempt-issue-labels: triaged diff --git a/.github/workflows/test-cft-devtools.yml b/.github/workflows/test-cft-devtools.yml new file mode 100644 index 00000000000..33f676ba9b4 --- /dev/null +++ b/.github/workflows/test-cft-devtools.yml @@ -0,0 +1,24 @@ +name: Test devtools image build +on: + pull_request: + branches: + - "master" + paths: + - "infra/build/**" + - ".github/workflows/test-cft-devtools.yml" + +concurrency: + group: '${{ github.workflow }}-${{ github.head_ref || github.ref }}' + cancel-in-progress: true + +jobs: + build-dev-tools: + name: Build CFT dev tools image + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Build + run: |- + cd infra/build && make build-image-developer-tools diff --git a/.github/workflows/test-cli.yml b/.github/workflows/test-cli.yml new file mode 100644 index 00000000000..88723d2ba5f --- /dev/null +++ b/.github/workflows/test-cli.yml @@ -0,0 +1,40 @@ +name: CFT CLI Tests + +on: + push: + branches: + - 'master' + paths: + - 'cli/**' + - '.github/workflows/test-cli.yml' + pull_request: + branches: + - 'master' + paths: + - 'cli/**' + - '.github/workflows/test-cli.yml' + +concurrency: + group: '${{github.workflow}}-${{ github.head_ref || github.ref }}' + cancel-in-progress: true + +jobs: + unit: + name: ${{ matrix.operating-system }} unit tests + runs-on: ${{ matrix.operating-system }} + defaults: + run: + shell: bash + working-directory: 'cli' + strategy: + fail-fast: false + matrix: + operating-system: [ubuntu-latest] + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0 + with: + go-version-file: cli/go.mod + cache-dependency-path: cli/go.sum + - run: |- + make test diff --git a/.github/workflows/update-tooling.yml b/.github/workflows/update-tooling.yml index bd41cfe492c..f4f7f303b85 100644 --- a/.github/workflows/update-tooling.yml +++ b/.github/workflows/update-tooling.yml @@ -2,66 +2,120 @@ name: Update Tooling on: schedule: - cron: "0 2 * * *" + workflow_dispatch: + release: + types: [released] + push: + branches: + - "master" + env: - TF_URL: "https://api.github.com/repos/hashicorp/terraform/releases/latest" - GCLOUD_URL: "https://dl.google.com/dl/cloudsdk/channels/rapid/google-cloud-sdk.tar.gz" + TERRAFORM_URL: "https://api.github.com/repos/hashicorp/terraform/releases/latest" + CLOUD_SDK_URL: "https://dl.google.com/dl/cloudsdk/channels/rapid/google-cloud-sdk.tar.gz" + KPT_URL: "https://api.github.com/repos/kptdev/kpt/releases" + CFT_CLI_URL: "https://api.github.com/repos/GoogleCloudPlatform/cloud-foundation-toolkit/releases" + MODULE_SWAPPER_URL: "https://api.github.com/repos/GoogleCloudPlatform/cloud-foundation-toolkit/releases" + TFLINT_BP_PLUGIN_URL: "https://api.github.com/repos/GoogleCloudPlatform/cloud-foundation-toolkit/releases" + GATOR_MINOR: "3.17" + GATOR_URL: "https://api.github.com/repos/open-policy-agent/gatekeeper/releases" + GCRANE_URL: "https://api.github.com/repos/google/go-containerregistry/releases/latest" + KUSTOMIZE_URL: "https://api.github.com/repos/kubernetes-sigs/kustomize/releases" + TERRAGRUNT_URL: "https://api.github.com/repos/gruntwork-io/terragrunt/releases/latest" + TFLINT_URL: "https://api.github.com/repos/terraform-linters/tflint/releases/latest" + GOLANGCI_URL: "https://api.github.com/repos/golangci/golangci-lint/releases/latest" jobs: update-tools: runs-on: ubuntu-latest + if: github.repository == 'GoogleCloudPlatform/cloud-foundation-toolkit' + steps: - - uses: actions/checkout@v2 - - name: Update Terraform Version + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + ref: master + - name: Update Tools run: | - CURRENT_TERRAFORM=$(cat infra/build/Makefile | grep 'TERRAFORM_VERSION :=' | awk -F" " '{print $3}') - LATEST_TERRAFORM=$(curl -s ${{env.TF_URL}} | jq --raw-output .tag_name | tr -d "v") + PR_UPDATE_BODY="" + newline=$'\n' + tools=("TERRAFORM" "CLOUD_SDK" "CFT_CLI" "GATOR" "GCRANE" "KPT" "MODULE_SWAPPER" "KUSTOMIZE" "TERRAGRUNT" "TFLINT" "GOLANGCI" "TFLINT_BP_PLUGIN") - if [ "$CURRENT_TERRAFORM" == "$LATEST_TERRAFORM" ]; then - echo "Terraform is latest" - echo "LATEST_TERRAFORM=is latest" >> $GITHUB_ENV - else - echo "Terraform needs to be updated to ${LATEST_TERRAFORM}" - sed -i "s/TERRAFORM_VERSION := ${CURRENT_TERRAFORM}/TERRAFORM_VERSION := ${LATEST_TERRAFORM}/g" infra/build/Makefile - echo "LATEST_TERRAFORM=${LATEST_TERRAFORM}" >> $GITHUB_ENV - fi - - name: Update gCloud SDK Version - run: | - CURRENT_GCLOUD=$(cat infra/build/Makefile | grep 'CLOUD_SDK_VERSION :=' | awk -F" " '{print $3}') - LATEST_GCLOUD=$(curl -s ${{env.GCLOUD_URL}} | tar --to-stdout -xzf - google-cloud-sdk/VERSION) + for tool in ${tools[@]} + do + # get current tool version from Makefile + CURRENT_TOOL_VERSION=$(cat infra/build/Makefile | grep "${tool}_VERSION :=" | awk -F" " '{print $3}') + TOOL_URL=$tool\_URL + + # get latest tool version from URL + if [ "$tool" == "CLOUD_SDK" ]; then + LATEST_TOOL_VERSION=$(curl -s ${!TOOL_URL} | tar --to-stdout -xzf - google-cloud-sdk/VERSION) + elif [ "$tool" == "KPT" ]; then + # get latest release including pre-releases for kpt, excluding releases of porch + LATEST_TOOL_VERSION=$(curl -s ${!TOOL_URL} | jq --raw-output '[ .[] | select( .tag_name | contains("porch/") | not )][0].tag_name' | tr -d "v") + elif [ "$tool" == "CFT_CLI" ]; then + # get latest CFT_CLI release + LATEST_TOOL_VERSION=$(curl -s ${!TOOL_URL} | jq --raw-output '[ .[] | select( .name | contains("CLI Release"))][0].tag_name' | tr -d "cli/v") + elif [ "$tool" == "MODULE_SWAPPER" ]; then + # get latest MODULE_SWAPPER release + LATEST_TOOL_VERSION=$(curl -s ${!TOOL_URL} | jq --raw-output '[ .[] | select( .name | contains("infra/module-swapper"))][0].tag_name' | tr -d "infra/module\-swapper/v") + elif [ "$tool" == "TFLINT_BP_PLUGIN" ]; then + # get latest TFLINT_BP_PLUGIN release + LATEST_TOOL_VERSION=$(curl -s ${!TOOL_URL} | jq --raw-output '[ .[] | select( .name | contains("tflint-ruleset-blueprint"))][0].tag_name' | tr -d "tflint\-ruleset\-blueprint/v") + elif [ "$tool" == "GATOR" ]; then + # get latest GATOR_MINOR release + LATEST_TOOL_VERSION=$(curl -s ${!TOOL_URL} | jq --raw-output '[ .[] | select( .name | contains("'${GATOR_MINOR}'"))][0].tag_name' | tr -d "v") + elif [ "$tool" == "KUSTOMIZE" ]; then + # get latest KUSTOMIZE release + LATEST_TOOL_VERSION=$(curl -s ${!TOOL_URL} | jq --raw-output '[ .[] | select( .name | contains("kustomize"))][0].tag_name' | tr -d "kustomize/v") + else + LATEST_TOOL_VERSION=$(curl -s ${!TOOL_URL} | jq --raw-output .tag_name | tr -d "v") + fi + echo "Current ${tool} version: ${CURRENT_TOOL_VERSION}" + echo "Latest ${tool} version: ${LATEST_TOOL_VERSION} via ${!TOOL_URL}" + + # update tool version in Makefile if not latest + if [ "$CURRENT_TOOL_VERSION" == "$LATEST_TOOL_VERSION" ]; then + echo "${tool} is latest" + elif [[ -z "$CURRENT_TOOL_VERSION" || "$CURRENT_TOOL_VERSION" == "" || "$CURRENT_TOOL_VERSION" == "null" ]]; then + echo "${tool} version is missing" + else + echo "${tool} needs to be updated to ${LATEST_TOOL_VERSION}" + sed -i "s/${tool}_VERSION := ${CURRENT_TOOL_VERSION}/${tool}_VERSION := ${LATEST_TOOL_VERSION}/g" infra/build/Makefile + echo "LATEST_${tool}=${LATEST_TOOL_VERSION}" >> $GITHUB_ENV + echo "BUMP_IMG=true" >> $GITHUB_ENV + PR_UPDATE_BODY="$PR_UPDATE_BODY Updating ${tool} from ${CURRENT_TOOL_VERSION} to ${LATEST_TOOL_VERSION} ${newline}" + fi + done - if [ "$CURRENT_GCLOUD" == "$LATEST_GCLOUD" ]; then - echo "gcloud sdk is latest" - echo "LATEST_GCLOUD=is latest" >> $GITHUB_ENV - else - echo "gcloud sdk needs to be updated to ${LATEST_GCLOUD}" - sed -i "s/CLOUD_SDK_VERSION := ${CURRENT_GCLOUD}/CLOUD_SDK_VERSION := ${LATEST_GCLOUD}/g" infra/build/Makefile - echo "LATEST_GCLOUD=${LATEST_GCLOUD}" >> $GITHUB_ENV - fi + # Create multiline PR body text + echo "PR_UPDATE_BODY<> $GITHUB_ENV + echo "$PR_UPDATE_BODY" >> $GITHUB_ENV + echo "EOF" >> $GITHUB_ENV - name: Bump image patch version - if: env.LATEST_TERRAFORM != 'is latest' || env.LATEST_GCLOUD != 'is latest' + if: env.BUMP_IMG == 'true' run: | CURRENT_IMG_VERSION=$(cat infra/build/Makefile | grep 'DOCKER_TAG_VERSION_DEVELOPER_TOOLS :=' | awk -F" " '{print $3}') NEW_IMG_VERSION=$(echo $CURRENT_IMG_VERSION | awk -F. '{$NF+=1; print $0}' OFS=".") sed -i "s/DOCKER_TAG_VERSION_DEVELOPER_TOOLS := ${CURRENT_IMG_VERSION}/DOCKER_TAG_VERSION_DEVELOPER_TOOLS := ${NEW_IMG_VERSION}/g" infra/build/Makefile echo "NEW_IMG_VERSION=${NEW_IMG_VERSION}" >> $GITHUB_ENV - name: Commit Makefile - if: env.LATEST_TERRAFORM != 'is latest' || env.LATEST_GCLOUD != 'is latest' + if: env.BUMP_IMG == 'true' run: | git config user.name 'Cloud Foundation Bot' git config user.email 'cloud-foundation-bot@google.com' git add infra/build/Makefile - git diff-index --quiet HEAD || git commit -m "Update tools to ${{env.NEW_IMG_VERSION}} - Terraform ${{env.LATEST_TERRAFORM}} & gCloud ${{env.LATEST_GCLOUD}}" + git diff-index --quiet HEAD || git commit -m "chore: Update Tools to ${{env.NEW_IMG_VERSION}}" - name: Create Pull Request - if: env.LATEST_TERRAFORM != 'is latest' || env.LATEST_GCLOUD != 'is latest' - uses: peter-evans/create-pull-request@v2 + if: env.BUMP_IMG == 'true' + uses: peter-evans/create-pull-request@67ccf781d68cd99b580ae25a5c18a1cc84ffff1f # v7.0.6 with: token: ${{ secrets.CFT_ROBOT_PAT }} - commit-message: Update tools to ${{env.NEW_IMG_VERSION}} - Terraform ${{env.LATEST_TERRAFORM}} & gCloud ${{env.LATEST_GCLOUD}} + commit-message: Update tools to ${{env.NEW_IMG_VERSION}} committer: "Update Tooling Action " - title: "Update Tools to ${{env.NEW_IMG_VERSION}} - Terraform ${{env.LATEST_TERRAFORM}} & gCloud ${{env.LATEST_GCLOUD}}" + title: "chore: Update Tools to ${{env.NEW_IMG_VERSION}}" body: | - Updated tools to ${{env.NEW_IMG_VERSION}} Terraform ${{env.LATEST_TERRAFORM}} & gCloud ${{env.LATEST_GCLOUD}} + Updated tools to ${{env.NEW_IMG_VERSION}} + ${{env.PR_UPDATE_BODY}} labels: automated pr - assignees: bharathkkb,morgante - reviewers: morgante + reviewers: bharathkkb, apeabody branch: create-pull-request/patch-tools-version + base: master diff --git a/.gitignore b/.gitignore index 5dc34e78b8b..a75c1fbb7eb 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,8 @@ *.iml .idea credentials.json -docs/meta/env -config-connector/tests/testcases/environments.yaml .DS_Store -.vscode \ No newline at end of file +.vscode +*.pyc +cli/bpmetadata/int-test/.working +go.work.sum diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000000..6263cdabdaf --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,18 @@ +exclude: | + (?x)^( + reports/.*| + cli/testdata/.*| + cli/bptest/.*| + infra/blueprint-test/.*.json + )$ +repos: + - repo: https://github.com/renovatebot/pre-commit-hooks + rev: 39.83.1 + hooks: + - id: renovate-config-validator + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v5.0.0 + hooks: + - id: check-yaml + - id: end-of-file-fixer + - id: trailing-whitespace diff --git a/.release-please-manifest.json b/.release-please-manifest.json new file mode 100644 index 00000000000..bc88458ec2f --- /dev/null +++ b/.release-please-manifest.json @@ -0,0 +1,5 @@ +{ + "infra/blueprint-test": "0.17.4", + "infra/module-swapper": "0.4.11", + "tflint-ruleset-blueprint": "0.2.7" +} diff --git a/CODEOWNERS b/CODEOWNERS index a09fc9fb703..c4c134c8739 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,11 +1,4 @@ # Main team administering overall repo -* @morgante @bharathkkb @ocsig - -# Deployment Manager Reviewers -/dm/ @morgante @ocsig @sjvanrossum - -# CLI owners -/cli/ @morgante @bharathkkb - -# Config Connector team -/config-connector/ @morgante @ocsig @AlexBulankou @maqiuyujoyce +* @bharathkkb @g-awmalik @apeabody +# ADC team reviewers +/cli/ @q2w @GoogleCloudPlatform/blueprint-solutions diff --git a/LICENSE b/LICENSE index 7a4a3ea2424..d6456956733 100644 --- a/LICENSE +++ b/LICENSE @@ -199,4 +199,4 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file + limitations under the License. diff --git a/README.md b/README.md index c289571dd0c..1480791cecb 100644 --- a/README.md +++ b/README.md @@ -4,21 +4,12 @@ The Cloud Foundation toolkit (henceforth, CFT) includes the following parts: -- A comprehensive set of production-ready resource templates that follow - Google's best practices, which can be used with the CFT or the gcloud - utility (part of the Google Cloud SDK) - see - [the template directory](dm/templates/) -- A command-line interface (henceforth, CLI) that deploys resources defined in - single or multiple CFT-compliant config files - see: - - The CFT source Python files (the `src/` directory) - - The [CFT DM User Guide](dm/docs/userguide.md) -- A growing set of sample [Config - Connector](https://cloud.google.com/config-connector/docs/overview) - YAML configurations - see [the solutions directory](config-connector/solutions/) +- A comprehensive set of production-ready Terraform blueprints that follow + Google's best practices. See https://cloud.google.com/docs/terraform/blueprints/terraform-blueprints for a complete list. +- A command-line interface (henceforth, CLI) that provides developer tooling for creating and managing Terraform blueprints. +- For the latest Config Connector blueprints, refer to https://github.com/GoogleCloudPlatform/blueprints. Older DM and Config Connector templates can be found in [previous releases](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/tree/v0.5.2). -In addition, the CFT repository includes a sample pipeline that enables running -CFT deployment operations from Jenkins - see the -[pipeline directory](dm/pipeline/README.md). +You can see a comparison between CFT and Fabric [here](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/blob/master/FABRIC-AND-CFT.md). ## License diff --git a/cli/Makefile b/cli/Makefile index 549d96f5a3b..8568f79a8e9 100644 --- a/cli/Makefile +++ b/cli/Makefile @@ -1,25 +1,45 @@ SHELL := /bin/bash # Changing this value will trigger a new release -VERSION=v0.4.1 +VERSION=v1.5.12 BINARY=bin/cft GITHUB_REPO=github.com/GoogleCloudPlatform/cloud-foundation-toolkit PLATFORMS := linux windows darwin BUILD_DIR=./bin NAME=cft BUCKET=gs://cft-cli +INT_TEST_DIR=./bpmetadata/int-test +SCHEMA_DIR=./bpmetadata/schema +SRC_PROTO_DIR=./bpmetadata/proto +PROTOC_DIR=./bpmetadata +DOCKER_TAG_VERSION_DEVELOPER_TOOLS := 1.23 +DOCKER_IMAGE_DEVELOPER_TOOLS := cft/developer-tools +REGISTRY_URL := gcr.io/cloud-foundation-cicd +GOLANGCI_VERSION := 1.63.3 # Setup the -ldflags option for go build here, interpolate the variable values LDFLAGS=-ldflags "-X $(GITHUB_REPO)/cli/cmd.Version=$(VERSION)" .PHONY: build -build: +build: protoc-gen build-schema go build ${LDFLAGS} -o ${BUILD_DIR}/${NAME} +.PHONY: protoc-gen +protoc-gen: + docker run --rm \ + -v "$(CURDIR)":/workspace \ + $(REGISTRY_URL)/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ + /bin/bash -c "protoc -I=${SRC_PROTO_DIR} --go_opt=paths=source_relative --go_out=${PROTOC_DIR} ${SRC_PROTO_DIR}/*.proto && \ + protoc-go-inject-tag -input='${PROTOC_DIR}/*.pb.go'" + +.PHONY: build-schema +build-schema: + go run ./${SCHEMA_DIR} -output=${SCHEMA_DIR} + .PHONY: publish publish: - gsutil cp "${BUILD_DIR}/*" "${BUCKET}/${VERSION}" - gsutil cp "${BUILD_DIR}/*" "${BUCKET}/latest" + gcloud alpha storage cp "${BUILD_DIR}/*" "${BUCKET}/${VERSION}" + gcloud alpha storage cp "${BUILD_DIR}/*" "${BUCKET}/latest" .PHONY: release release: $(PLATFORMS) @@ -28,6 +48,17 @@ release: $(PLATFORMS) $(PLATFORMS): GO111MODULE=on GOOS=$@ GOARCH=amd64 CGO_ENABLED=0 go build ${LDFLAGS} -o "${BUILD_DIR}/${NAME}-$@-amd64" -.PHONY: test -test: +.PHONY: int_test +int_test: + ${INT_TEST_DIR}/workflow.sh ${INT_TEST_DIR} + +.PHONY: go_test +go_test: build go test ./... + +.PHONY: test +test: build go_test int_test + +.PHONY: docker_go_lint +docker_go_lint: + docker run --rm -v $(PWD):/cli -w /cli golangci/golangci-lint:v${GOLANGCI_VERSION} golangci-lint --timeout=5m -v run diff --git a/cli/README.md b/cli/README.md index a9118c3665f..85bf7463b87 100644 --- a/cli/README.md +++ b/cli/README.md @@ -44,4 +44,4 @@ After build find binary at bin/cft location ## License -Apache 2.0 - See [LICENSE](LICENSE) for more information. \ No newline at end of file +Apache 2.0 - See [LICENSE](LICENSE) for more information. diff --git a/cli/bin/.gitignore b/cli/bin/.gitignore index c96a04f008e..d6b7ef32c84 100644 --- a/cli/bin/.gitignore +++ b/cli/bin/.gitignore @@ -1,2 +1,2 @@ * -!.gitignore \ No newline at end of file +!.gitignore diff --git a/cli/bpbuild/build.go b/cli/bpbuild/build.go new file mode 100644 index 00000000000..1e6b7ae22d5 --- /dev/null +++ b/cli/bpbuild/build.go @@ -0,0 +1,151 @@ +package bpbuild + +import ( + "context" + "fmt" + "os" + "time" + + cloudbuild "google.golang.org/api/cloudbuild/v1" + "gopkg.in/yaml.v3" +) + +const ( + successStatus = "SUCCESS" + failedStatus = "FAILURE" +) + +// getCBBuildsWithFilter returns a list of cloudbuild builds in projectID with a given filter. +// Additional client side filters can be specified via cFilters. +// TODO(bharathkkb): move https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/tree/master/infra/utils/fbf into CLI +func getCBBuildsWithFilter(projectID string, filter string, cFilters []clientBuildFilter) ([]*cloudbuild.Build, error) { + ctx := context.Background() + cloudbuildService, err := cloudbuild.NewService(ctx) + if err != nil { + return nil, fmt.Errorf("error creating cloudbuild service: %w", err) + } + + c, err := cloudbuildService.Projects.Builds.List(projectID).Filter(filter).Do() + if err != nil { + return nil, fmt.Errorf("error listing builds with filter %s in project %s: %w", filter, projectID, err) + } + + cbBuilds := []*cloudbuild.Build{} + appendClientFilteredBuilds := func(builds []*cloudbuild.Build) { + for _, b := range builds { + appendBuild := true + for _, cFilter := range cFilters { + // skip if any client side filter evaluates to false + if !cFilter(b) { + appendBuild = false + break + } + } + if appendBuild { + cbBuilds = append(cbBuilds, b) + } + } + } + + if len(c.Builds) < 1 { + return nil, fmt.Errorf("no builds found with filter %s in project %s", filter, projectID) + } + appendClientFilteredBuilds(c.Builds) + + // pagination + for { + c, err = cloudbuildService.Projects.Builds.List(projectID).Filter(filter).PageToken(c.NextPageToken).Do() + if err != nil { + return nil, fmt.Errorf("error retrieving next page with token %s: %w", c.NextPageToken, err) + } + appendClientFilteredBuilds(c.Builds) + if c.NextPageToken == "" { + break + } + } + return cbBuilds, nil +} + +// clientside filter functions +type clientBuildFilter func(*cloudbuild.Build) bool + +// filterRealBuilds filters out builds not triggered from source repos (i.e by automation). +func filterRealBuilds(b *cloudbuild.Build) bool { + for _, subs := range []string{"COMMIT_SHA", "REPO_NAME", "TRIGGER_NAME"} { + _, substExists := b.Substitutions[subs] + if !substExists { + return false + } + } + return true +} + +// filterGHRepoBuilds filters builds from a particular repo name. +// TODO:(bharathkkb): We should ideally be using a sever side filter for this https://cloud.google.com/build/docs/view-build-results#filtering_build_results_using_queries +// but I was not able to figure out expected format for GH URLs. +func filterGHRepoBuilds(repo string) clientBuildFilter { + return func(b *cloudbuild.Build) bool { + name, exists := b.Substitutions["REPO_NAME"] + if !exists { + return false + } + return name == repo + } +} + +// successBuildsBtwFilterExpr returns a CEL expression as string +// for finding all successful builds between start and end time. +func successBuildsBtwFilterExpr(start, end time.Time) string { + return fmt.Sprintf( + "create_time>=\"%s\" AND create_time<\"%s\" AND status=\"%s\"", + start.Format(time.RFC3339), + end.Format(time.RFC3339), + successStatus) +} + +// getBuildFromFile unmarshalls a CloudBuild file at path. +func getBuildFromFile(path string) (*cloudbuild.Build, error) { + content, err := os.ReadFile(path) + if err != nil { + return nil, err + } + var b cloudbuild.Build + err = yaml.Unmarshal(content, &b) + if err != nil { + return nil, err + } + return &b, nil +} + +// getBuildStepIDs retrieves a slice of build step IDs in a build. +func getBuildStepIDs(b *cloudbuild.Build) []string { + steps := []string{} + for _, bs := range b.Steps { + steps = append(steps, bs.Id) + } + return steps +} + +// findBuildStageDurations computes duration for a given build stage across a slice of builds +// if and only if stage is successful. +func findBuildStageDurations(stepId string, builds []*cloudbuild.Build) ([]time.Duration, error) { + durations := []time.Duration{} + for _, b := range builds { + for _, bs := range b.Steps { + if bs.Id != stepId || bs.Status != successStatus { + continue + } + + parsedStartTime, err := time.Parse(time.RFC3339Nano, bs.Timing.StartTime) + if err != nil { + return []time.Duration{}, err + } + parsedEndTime, err := time.Parse(time.RFC3339Nano, bs.Timing.EndTime) + if err != nil { + return []time.Duration{}, err + } + durations = append(durations, parsedEndTime.Sub(parsedStartTime).Truncate(time.Second)) + } + } + return durations, nil +} diff --git a/cli/bpbuild/build_test.go b/cli/bpbuild/build_test.go new file mode 100644 index 00000000000..27408ef8c30 --- /dev/null +++ b/cli/bpbuild/build_test.go @@ -0,0 +1,224 @@ +package bpbuild + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + cloudbuild "google.golang.org/api/cloudbuild/v1" +) + +func TestFilterRealBuilds(t *testing.T) { + tests := []struct { + name string + subst map[string]string + want bool + }{ + { + name: "fail", + subst: map[string]string{"foo": "bar"}, + want: false, + }, + { + name: "partial", + subst: map[string]string{"REPO_NAME": "bar"}, + want: false, + }, + { + name: "pass", + subst: map[string]string{ + "REPO_NAME": "bar", + "COMMIT_SHA": "bar", + "TRIGGER_NAME": "bar", + }, + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + b := newTestBuild(tt.subst, nil) + if got := filterRealBuilds(b); got != tt.want { + t.Errorf("filterRealBuilds() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestFilterGHRepoBuilds(t *testing.T) { + tests := []struct { + name string + repo string + subst map[string]string + want bool + }{ + { + name: "fail", + repo: "foo", + subst: map[string]string{"foo": "bar"}, + want: false, + }, + { + name: "pass", + repo: "foo", + subst: map[string]string{"REPO_NAME": "foo"}, + want: true, + }, + { + name: "fail different", + repo: "bar", + subst: map[string]string{"REPO_NAME": "foo"}, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + b := newTestBuild(tt.subst, nil) + if got := filterGHRepoBuilds(tt.repo)(b); got != tt.want { + t.Errorf("filterGHRepoBuilds() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestFindBuildStageDurations(t *testing.T) { + tests := []struct { + name string + stepId string + builds []*cloudbuild.Build + want []time.Duration + wantErr bool + }{ + { + name: "simple", + stepId: "foo", + builds: []*cloudbuild.Build{newTestBuild(nil, []*cloudbuild.BuildStep{newTestBuildStep("foo", time.Hour, successStatus)})}, + want: []time.Duration{time.Hour}, + }, + { + name: "multiple builds", + stepId: "foo", + builds: []*cloudbuild.Build{ + newTestBuild(nil, []*cloudbuild.BuildStep{newTestBuildStep("foo", time.Hour, successStatus)}), + newTestBuild(nil, []*cloudbuild.BuildStep{newTestBuildStep("foo", time.Hour*2, successStatus)}), + newTestBuild(nil, []*cloudbuild.BuildStep{newTestBuildStep("foo", time.Hour*4, successStatus)}), + }, + want: []time.Duration{ + time.Hour, + time.Hour * 2, + time.Hour * 4, + }, + }, + { + name: "multiple builds multiple steps", + stepId: "foo", + builds: []*cloudbuild.Build{ + newTestBuild(nil, []*cloudbuild.BuildStep{ + newTestBuildStep("foo", time.Hour, successStatus), + newTestBuildStep("bar", time.Hour*2, successStatus), + }), + newTestBuild(nil, []*cloudbuild.BuildStep{ + newTestBuildStep("foo", time.Hour*2, successStatus), + newTestBuildStep("bar", time.Hour*8, successStatus), + }), + newTestBuild(nil, []*cloudbuild.BuildStep{ + newTestBuildStep("foo", time.Hour*4, successStatus), + newTestBuildStep("bar", time.Hour, successStatus), + }), + }, + want: []time.Duration{ + time.Hour, + time.Hour * 2, + time.Hour * 4, + }, + }, + { + name: "multiple builds multiple steps with fails", + stepId: "foo", + builds: []*cloudbuild.Build{ + newTestBuild(nil, []*cloudbuild.BuildStep{ + newTestBuildStep("foo", time.Hour, failedStatus), + newTestBuildStep("bar", time.Hour*2, successStatus), + }), + newTestBuild(nil, []*cloudbuild.BuildStep{ + newTestBuildStep("foo", time.Hour*2, successStatus), + newTestBuildStep("bar", time.Hour*8, successStatus), + }), + newTestBuild(nil, []*cloudbuild.BuildStep{ + newTestBuildStep("foo", time.Hour*4, failedStatus), + newTestBuildStep("bar", time.Hour, successStatus), + }), + }, + want: []time.Duration{ + time.Hour * 2, + }, + }, + { + name: "empty multiple builds multiple steps but all matched step failed", + stepId: "foo", + builds: []*cloudbuild.Build{ + newTestBuild(nil, []*cloudbuild.BuildStep{ + newTestBuildStep("foo", time.Hour, failedStatus), + newTestBuildStep("bar", time.Hour*2, successStatus), + }), + newTestBuild(nil, []*cloudbuild.BuildStep{ + newTestBuildStep("foo", time.Hour*2, failedStatus), + newTestBuildStep("bar", time.Hour*8, successStatus), + }), + newTestBuild(nil, []*cloudbuild.BuildStep{ + newTestBuildStep("foo", time.Hour*4, failedStatus), + newTestBuildStep("bar", time.Hour, successStatus), + }), + }, + want: []time.Duration{}, + }, + { + name: "empty multiple builds multiple steps no match", + stepId: "baz", + builds: []*cloudbuild.Build{ + newTestBuild(nil, []*cloudbuild.BuildStep{ + newTestBuildStep("foo", time.Hour, successStatus), + newTestBuildStep("bar", time.Hour*2, successStatus), + }), + newTestBuild(nil, []*cloudbuild.BuildStep{ + newTestBuildStep("foo", time.Hour*2, successStatus), + newTestBuildStep("bar", time.Hour*8, successStatus), + }), + newTestBuild(nil, []*cloudbuild.BuildStep{ + newTestBuildStep("foo", time.Hour*4, successStatus), + newTestBuildStep("bar", time.Hour, successStatus), + }), + }, + want: []time.Duration{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := findBuildStageDurations(tt.stepId, tt.builds) + if (err != nil) != tt.wantErr { + t.Errorf("findBuildStageDurations() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !assert.Equal(t, got, tt.want) { + t.Errorf("findBuildStageDurations() = %v, want %v", got, tt.want) + } + }) + } +} + +func newTestBuild(subst map[string]string, bs []*cloudbuild.BuildStep) *cloudbuild.Build { + return &cloudbuild.Build{ + Substitutions: subst, + Steps: bs, + } +} + +func newTestBuildStep(id string, length time.Duration, status string) *cloudbuild.BuildStep { + return &cloudbuild.BuildStep{ + Id: id, + Status: status, + Timing: &cloudbuild.TimeSpan{ + StartTime: time.Now().Format(time.RFC3339Nano), + EndTime: time.Now().Add(length).Format(time.RFC3339Nano), + }, + } +} diff --git a/cli/bpbuild/cmd.go b/cli/bpbuild/cmd.go new file mode 100644 index 00000000000..0d241daa9ed --- /dev/null +++ b/cli/bpbuild/cmd.go @@ -0,0 +1,143 @@ +package bpbuild + +import ( + "fmt" + "os" + "time" + + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/cli/util" + "github.com/fatih/color" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var avgTimeFlags struct { + projectId string + repoName string + buildFilePath string + buildStepID string + lookUpStart string + lookUpStartTime time.Time + lookUpEnd string + lookUpEndTime time.Time +} + +const defaultBuildFilePath = "build/int.cloudbuild.yaml" + +func init() { + viper.AutomaticEnv() + Cmd.AddCommand(avgTimeCmd) + + avgTimeCmd.Flags().StringVar(&avgTimeFlags.buildFilePath, "build-file", defaultBuildFilePath, "Path to file containing CloudBuild configs.") + avgTimeCmd.Flags().StringVar(&avgTimeFlags.buildStepID, "step", "", "ID of build step to compute avg.") + avgTimeCmd.Flags().StringVar(&avgTimeFlags.lookUpStart, "start-time", "", "Time to start computing build step avg in form MM-DD-YYYY. Defaults to one month ago.") + avgTimeCmd.Flags().StringVar(&avgTimeFlags.lookUpEnd, "end-time", "", "Time to stop computing build step avg in form MM-DD-YYYY. Defaults to current date.") + avgTimeCmd.Flags().StringVar(&avgTimeFlags.projectId, "project-id", "cloud-foundation-cicd", "Project ID where builds are executed.") + avgTimeCmd.Flags().StringVar(&avgTimeFlags.repoName, "repo", "", "Name of repo that triggered the builds. Defaults to extracting from git config.") +} + +var Cmd = &cobra.Command{ + Use: "builds", + Short: "Blueprint builds", + Long: `Blueprint builds CLI is used to get information about blueprint builds.`, + Args: cobra.NoArgs, +} + +var avgTimeCmd = &cobra.Command{ + Use: "avgtime", + Short: "average time for build step", + Long: `Compute average time for a given build step across build executions from a given start-time to end-time.`, + Args: cobra.NoArgs, + RunE: calcAvgTime, +} + +func calcAvgTime(cmd *cobra.Command, args []string) error { + // set any computed defaults + if err := setAvgTimeFlagDefaults(); err != nil { + return err + } + + // build filters + filterExpr := successBuildsBtwFilterExpr(avgTimeFlags.lookUpStartTime, avgTimeFlags.lookUpEndTime) + cFilters := []clientBuildFilter{ + filterRealBuilds, + filterGHRepoBuilds(avgTimeFlags.repoName), + } + + // get builds and compute avg + builds, err := getCBBuildsWithFilter(avgTimeFlags.projectId, filterExpr, cFilters) + if err != nil { + return fmt.Errorf("error retrieving builds: %w", err) + } + durations, err := findBuildStageDurations(avgTimeFlags.buildStepID, builds) + if err != nil { + return err + } + if len(durations) < 1 { + return fmt.Errorf("error no successful build stage %s found", avgTimeFlags.buildStepID) + } + avgTime := durationAvg(durations) + + // todo(bharathkkb): Add JSON output + fmt.Printf("Discovered %d samples for %s stage between %s and %s\n", len(durations), avgTimeFlags.buildStepID, avgTimeFlags.lookUpStart, avgTimeFlags.lookUpEnd) + color.Green("Computed average time: %s", avgTime) + return nil +} + +// setAvgTimeFlagDefaults sets computed defaults for any missing flags. +// An error is thrown if a default cannot be computed. +func setAvgTimeFlagDefaults() error { + // if no explicit repo name specified via flag, try to auto discover + if avgTimeFlags.repoName == "" { + Log.Info("No repo specified, attempting to detect repo name from current dir") + path, err := os.Getwd() + if err != nil { + return fmt.Errorf("error getting working dir: %w", err) + } + r, err := getRepoName(path) + if err != nil { + return fmt.Errorf("error finding repo name: %w", err) + } + if r == "" { + return fmt.Errorf("unable to detect repo name, please specify a name using --repo") + } + avgTimeFlags.repoName = r + Log.Info("Found repo", "default", avgTimeFlags.repoName) + } + + // if no explicit build step specified via flag, prompt user with possible options from CloudBuild configs. + if avgTimeFlags.buildStepID == "" { + Log.Info("No build ID specified, attempting to find and prompt for build step ID from build file.") + buildFile, err := getBuildFromFile(avgTimeFlags.buildFilePath) + if err != nil { + return fmt.Errorf("error finding build file: %w", err) + } + steps := getBuildStepIDs(buildFile) + avgTimeFlags.buildStepID = util.PromptSelect("Select build step to compute average", steps) + } + + // if no explicit start time, default to starting computation from one month ago. + if avgTimeFlags.lookUpStart == "" { + avgTimeFlags.lookUpStart = time.Now().AddDate(0, -1, 0).Format("01-02-2006") + Log.Info("No start time specified.", "default", avgTimeFlags.lookUpStart) + } + + startTime, err := getTimeFromStr(avgTimeFlags.lookUpStart) + if err != nil { + return fmt.Errorf("error converting %s to time: %w", avgTimeFlags.lookUpStart, err) + } + avgTimeFlags.lookUpStartTime = startTime + + // if no explicit end time, default to ending computation to now. + if avgTimeFlags.lookUpEnd == "" { + avgTimeFlags.lookUpEnd = time.Now().Format("01-02-2006") + Log.Info("No end time specified.", "default", avgTimeFlags.lookUpEnd) + } + + endTime, err := getTimeFromStr(avgTimeFlags.lookUpEnd) + if err != nil { + return fmt.Errorf("error converting %s to time: %w", avgTimeFlags.lookUpEnd, err) + } + avgTimeFlags.lookUpEndTime = endTime + return nil +} diff --git a/cli/bpbuild/git.go b/cli/bpbuild/git.go new file mode 100644 index 00000000000..85543bca7b7 --- /dev/null +++ b/cli/bpbuild/git.go @@ -0,0 +1,36 @@ +package bpbuild + +import ( + "fmt" + "net/url" + "strings" + + git "github.com/go-git/go-git/v5" +) + +const defaultRemote = "origin" + +// getRepoName finds upstream repo name from a given repo directory +func getRepoName(dir string) (string, error) { + r, err := git.PlainOpen(dir) + if err != nil { + return "", fmt.Errorf("error opening git dir %s: %w", dir, err) + } + rm, err := r.Remote(defaultRemote) + if err != nil { + return "", fmt.Errorf("error finding remote %s in git dir %s: %w", defaultRemote, dir, err) + } + + // validate remote URL + remoteURL, err := url.Parse(rm.Config().URLs[0]) + if err != nil { + return "", fmt.Errorf("error parsing remote URL: %w", err) + } + trimmedRemotePath := strings.TrimSuffix(remoteURL.Path, "/") + splitRemotePath := strings.Split(trimmedRemotePath, "/") + // expect path to be /owner/repo + if len(splitRemotePath) != 3 { + return "", fmt.Errorf("expected owner/repo, got %s", trimmedRemotePath) + } + return splitRemotePath[len(splitRemotePath)-1], nil +} diff --git a/cli/bpbuild/git_test.go b/cli/bpbuild/git_test.go new file mode 100644 index 00000000000..7583f8d25fa --- /dev/null +++ b/cli/bpbuild/git_test.go @@ -0,0 +1,79 @@ +package bpbuild + +import ( + "testing" + + git "github.com/go-git/go-git/v5" + "github.com/go-git/go-git/v5/config" +) + +func TestGetRepoName(t *testing.T) { + tests := []struct { + name string + repo string + remote string + want string + wantErr bool + }{ + { + name: "simple", + repo: "https://github.com/foo/bar", + remote: defaultRemote, + want: "bar", + }, + { + name: "simple trailing", + repo: "https://gitlab.com/foo/bar/", + remote: defaultRemote, + want: "bar", + }, + { + name: "no scheme", + repo: "github.com/foo/bar", + remote: defaultRemote, + want: "bar", + }, + { + name: "invalid path", + repo: "github.com/foo/bar/baz", + remote: defaultRemote, + wantErr: true, + }, + { + name: "invalid remote", + repo: "github.com/foo/bar", + remote: "foo", + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + dir := tempGitRepoWithRemote(t, tt.repo, tt.remote) + got, err := getRepoName(dir) + if (err != nil) != tt.wantErr { + t.Errorf("getRepoName() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("getRepoName() = %v, want %v", got, tt.want) + } + }) + } +} + +func tempGitRepoWithRemote(t *testing.T, repoURL, remote string) string { + t.Helper() + dir := t.TempDir() + r, err := git.PlainInit(dir, true) + if err != nil { + t.Fatalf("Error creating git repo in tempdir: %v", err) + } + _, err = r.CreateRemote(&config.RemoteConfig{ + Name: remote, + URLs: []string{repoURL}, + }) + if err != nil { + t.Fatalf("Error creating remote in tempdir repo: %v", err) + } + return dir +} diff --git a/cli/bpbuild/main.go b/cli/bpbuild/main.go new file mode 100644 index 00000000000..7664aa27622 --- /dev/null +++ b/cli/bpbuild/main.go @@ -0,0 +1,8 @@ +package bpbuild + +import ( + log "github.com/inconshreveable/log15" +) + +// bpbuild log15 handler +var Log = log.New() diff --git a/cli/bpbuild/time.go b/cli/bpbuild/time.go new file mode 100644 index 00000000000..6829937dc22 --- /dev/null +++ b/cli/bpbuild/time.go @@ -0,0 +1,21 @@ +package bpbuild + +import "time" + +// durationAvg calculates avg for a given slice of durations. +func durationAvg(durations []time.Duration) time.Duration { + if len(durations) < 1 { + return time.Duration(0) + } + var total time.Duration + for _, d := range durations { + total += d + } + avg := total.Seconds() / float64(len(durations)) + return time.Duration(avg * float64(time.Second)) +} + +// getTimeFromStr parses string formatted MM-DD-YYY as time. +func getTimeFromStr(t string) (time.Time, error) { + return time.Parse("01-02-2006", t) +} diff --git a/cli/bpbuild/time_test.go b/cli/bpbuild/time_test.go new file mode 100644 index 00000000000..7934f4491d9 --- /dev/null +++ b/cli/bpbuild/time_test.go @@ -0,0 +1,42 @@ +package bpbuild + +import ( + "testing" + "time" +) + +func TestDurationAvg(t *testing.T) { + tests := []struct { + name string + durations []time.Duration + want time.Duration + }{ + { + name: "single", + durations: []time.Duration{time.Hour}, + want: time.Hour, + }, + { + name: "multiple", + durations: []time.Duration{time.Hour, time.Hour}, + want: time.Hour, + }, + { + name: "mixed", + durations: []time.Duration{time.Hour, 2 * time.Hour, 2 * time.Hour, 3 * time.Hour}, + want: 2 * time.Hour, + }, + { + name: "empty", + durations: []time.Duration{}, + want: time.Duration(0), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := durationAvg(tt.durations); got != tt.want { + t.Errorf("durationAvg() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/cli/bpcatalog/cmd.go b/cli/bpcatalog/cmd.go new file mode 100644 index 00000000000..5fac40bb326 --- /dev/null +++ b/cli/bpcatalog/cmd.go @@ -0,0 +1,70 @@ +package bpcatalog + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var catalogListFlags struct { + format renderFormat + sort sortOption +} + +const ( + tfModulesOrg = "terraform-google-modules" + gcpOrg = "GoogleCloudPlatform" +) + +var ( + // any repos that match terraform-google-* but should not be included + repoIgnoreList = map[string]bool{ + "terraform-google-conversion": true, + "terraform-google-examples": true, + } + // any repos that do not match terraform-google-* but should be included + repoAllowList = map[string]bool{ + "terraform-example-foundation": true, + } +) + +func init() { + viper.AutomaticEnv() + Cmd.AddCommand(listCmd) + + listCmd.Flags().Var(&catalogListFlags.format, "format", fmt.Sprintf("Format to display catalog. Defaults to table. Options are %+v.", renderFormats)) + listCmd.Flags().Var(&catalogListFlags.sort, "sort", fmt.Sprintf("Sort results. Defaults to created date. Options are %+v.", sortOptions)) +} + +var Cmd = &cobra.Command{ + Use: "catalog", + Short: "Blueprint catalog", + Long: `Blueprint catalog is used to get information about blueprints catalog.`, + Args: cobra.NoArgs, +} + +var listCmd = &cobra.Command{ + Use: "list", + Short: "lists blueprints", + Long: `Lists blueprints in catalog`, + Args: cobra.NoArgs, + RunE: listCatalog, +} + +func listCatalog(cmd *cobra.Command, args []string) error { + // defaults + if catalogListFlags.format.Empty() { + catalogListFlags.format = renderTable + } + if catalogListFlags.sort.Empty() { + catalogListFlags.sort = sortCreated + } + gh := newGHService(withTokenClient(), withOrgs([]string{tfModulesOrg, gcpOrg})) + repos, err := fetchSortedTFRepos(gh, catalogListFlags.sort) + if err != nil { + return err + } + return render(repos, os.Stdout, catalogListFlags.format, viper.GetBool("verbose")) +} diff --git a/cli/bpcatalog/gh.go b/cli/bpcatalog/gh.go new file mode 100644 index 00000000000..75daeaf6a5c --- /dev/null +++ b/cli/bpcatalog/gh.go @@ -0,0 +1,110 @@ +package bpcatalog + +import ( + "context" + "fmt" + "net/http" + "os" + "sort" + + "github.com/google/go-github/v68/github" + "golang.org/x/oauth2" +) + +const ghTokenEnvVar = "GITHUB_TOKEN" + +type ghService struct { + client *github.Client + ctx context.Context + orgs []string +} + +type ghServiceOption func(*ghService) + +func withOrgs(orgs []string) ghServiceOption { + return func(g *ghService) { + g.orgs = orgs + } +} + +func withClient(c *http.Client) ghServiceOption { + return func(g *ghService) { + g.client = github.NewClient(c) + } +} + +func withTokenClient() ghServiceOption { + return func(g *ghService) { + pat, isSet := os.LookupEnv(ghTokenEnvVar) + if !isSet { + Log.Crit(fmt.Sprintf("GitHub token env var %s is not set", ghTokenEnvVar)) + os.Exit(1) + } + ts := oauth2.StaticTokenSource( + &oauth2.Token{AccessToken: pat}, + ) + tc := oauth2.NewClient(g.ctx, ts) + g.client = github.NewClient(tc) + } +} + +func newGHService(opts ...ghServiceOption) *ghService { + ctx := context.Background() + ghs := &ghService{ + client: github.NewClient(nil), + ctx: ctx, + } + for _, opt := range opts { + opt(ghs) + } + return ghs +} + +type repos []*github.Repository + +// filter filters repos using a given filter func. +func (r repos) filter(filter func(*github.Repository) bool) repos { + var filtered []*github.Repository + for _, repo := range r { + if filter(repo) { + filtered = append(filtered, repo) + } + } + return filtered +} + +// sort sorts repos using a given sort option. +func (r repos) sort(s sortOption) (repos, error) { + switch s { + case sortCreated: + sort.SliceStable(r, func(i, j int) bool { return r[i].GetCreatedAt().Before(r[j].GetCreatedAt().Time) }) + case sortStars: + sort.SliceStable(r, func(i, j int) bool { return r[i].GetStargazersCount() < r[j].GetStargazersCount() }) + case sortName: + sort.SliceStable(r, func(i, j int) bool { return r[i].GetName() < r[j].GetName() }) + default: + return nil, fmt.Errorf("one of %+v expected. unknown format: %s", sortOptions, catalogListFlags.sort) + } + return r, nil +} + +// fetchRepos fetches all repos across multiple orgs. +func (g *ghService) fetchRepos() (repos, error) { + opts := &github.RepositoryListByOrgOptions{ListOptions: github.ListOptions{PerPage: 100}, Type: "public"} + var allRepos []*github.Repository + for _, org := range g.orgs { + for { + repos, resp, err := g.client.Repositories.ListByOrg(g.ctx, org, opts) + if err != nil { + return nil, err + } + allRepos = append(allRepos, repos...) + // if no next page, we have reached end of pagination + if resp.NextPage == 0 { + break + } + opts.Page = resp.NextPage + } + } + return allRepos, nil +} diff --git a/cli/bpcatalog/main.go b/cli/bpcatalog/main.go new file mode 100644 index 00000000000..e3987e7aec9 --- /dev/null +++ b/cli/bpcatalog/main.go @@ -0,0 +1,8 @@ +package bpcatalog + +import ( + log "github.com/inconshreveable/log15" +) + +// bpcatalog log15 handler +var Log = log.New() diff --git a/cli/bpcatalog/render.go b/cli/bpcatalog/render.go new file mode 100644 index 00000000000..c91c5059cf6 --- /dev/null +++ b/cli/bpcatalog/render.go @@ -0,0 +1,230 @@ +package bpcatalog + +import ( + "bytes" + "fmt" + "html/template" + "io" + "sort" + "strconv" + "strings" + + "github.com/jedib0t/go-pretty/table" +) + +// renderFormat defines the set of render options for catalog. +type renderFormat string + +func (r *renderFormat) String() string { + return string(*r) +} + +func (r *renderFormat) Empty() bool { + return r.String() == "" +} + +func (r *renderFormat) Set(v string) error { + f, err := renderFormatFromString(v) + if err != nil { + return err + } + *r = f + return nil +} + +func renderFormatFromString(s string) (renderFormat, error) { + format := renderFormat(s) + for _, stage := range renderFormats { + if format == stage { + return format, nil + } + } + return "", fmt.Errorf("one of %+v expected. unknown format: %s", renderFormats, s) +} + +func (r *renderFormat) Type() string { + return "renderFormat" +} + +const ( + renderTable renderFormat = "table" + renderCSV renderFormat = "csv" + renderHTML renderFormat = "html" + + renderTimeformat = "2006-01-02" + e2eLabel = "end-to-end" + htmlTemplate = ` + + + + + + + + +{{range .}}{{if .Categories}} + + + + + +{{end}}{{end}} + +
CategoryBlueprintDescription
{{.Categories}}{{.DisplayName}}{{.Description}}
` +) + +var ( + renderFormats = []renderFormat{renderTable, renderCSV, renderHTML} + + // maps GH topics to categories + // https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/blob/29e980be9f3e3535f4b0b7314c9e1aea5ec2001f/infra/terraform/test-org/org/locals.tf#L39-L53 + topicToCategory = map[string]string{ + e2eLabel: "End-to-end", + "healthcare-life-sciences": "Healthcare and life sciences", + "serverless-computing": "Serverless computing", + "compute": "Compute", + "containers": "Containers", + "databases": "Databases", + "networking": "Networking", + "data-analytics": "Data analytics", + "storage": "Storage", + "operations": "Operations", + "developer-tools": "Developer tools", + "security-identity": "Security and identity", + "workspace": "Workspace", + } + + // static display data for docs mode + // these repos are not currently auto discovered + staticDM = []displayMeta{ + { + DisplayName: "fabric", + URL: "https://github.com/terraform-google-modules/cloud-foundation-fabric", + Categories: "End to end", + IsE2E: true, + Description: "Advanced examples designed for prototyping", + }, + { + DisplayName: "ai-notebook", + URL: "https://github.com/GoogleCloudPlatform/notebooks-blueprint-security", + Categories: "End to end, Data analytics", + IsE2E: true, + Description: "Protect confidential data in Vertex AI Workbench notebooks", + }, + } +) + +// displayMeta stores processed display metadata. +// Currently it processes from repo info but +// may also pull from other sources like blueprint meta +// in the future. +type displayMeta struct { + Name string + DisplayName string + Stars string + CreatedAt string + Description string + Labels []string + URL string + Categories string + IsE2E bool +} + +// render writes given repo information in the specified renderFormat to w. +func render(r repos, w io.Writer, format renderFormat, verbose bool) error { + dm := reposToDisplayMeta(r) + if format == renderHTML { + _, err := w.Write([]byte(renderDocHTML(append(dm, staticDM...)))) + if err != nil { + return err + } + return nil + } + + tbl := table.NewWriter() + tbl.SetOutputMirror(w) + h := table.Row{"Repo", "Stars", "Created"} + if verbose { + h = append(h, "Description") + } + tbl.AppendHeader(h) + + for _, repo := range r { + row := table.Row{repo.GetName(), repo.GetStargazersCount(), repo.GetCreatedAt().Format(renderTimeformat)} + if verbose { + row = append(row, repo.GetDescription()) + } + tbl.AppendRow(row) + } + switch format { + case renderTable: + tbl.Render() + case renderCSV: + tbl.RenderCSV() + default: + return fmt.Errorf("one of %+v expected. unknown format: %s", renderFormats, catalogListFlags.format) + } + return nil +} + +// reposToDisplayMeta converts repo to displayMeta. +func reposToDisplayMeta(r repos) []displayMeta { + dm := make([]displayMeta, 0, len(r)) + for _, repo := range r { + displayName := strings.TrimPrefix(repo.GetName(), "terraform-google-") + displayName = strings.TrimPrefix(displayName, "terraform-") + d := displayMeta{ + Name: repo.GetName(), + DisplayName: displayName, + URL: repo.GetHTMLURL(), + Stars: strconv.Itoa(repo.GetStargazersCount()), + CreatedAt: repo.GetCreatedAt().Format(renderTimeformat), + Description: repo.GetDescription(), + Labels: repo.Topics, + } + + // gh topics to categories + parsedCategories := []string{} + for _, topic := range repo.Topics { + p, exists := topicToCategory[topic] + if exists { + parsedCategories = append(parsedCategories, p) + } + if topic == e2eLabel { + d.IsE2E = true + } + } + if len(parsedCategories) > 0 { + sort.Strings(parsedCategories) + d.Categories = strings.Join(parsedCategories, ", ") + } + dm = append(dm, d) + } + return dm +} + +// docSort sorts displayMeta surfacing e2e blueprints first for documentation. +func docSort(dm []displayMeta) []displayMeta { + sort.SliceStable(dm, func(i, j int) bool { + if dm[i].IsE2E && dm[j].IsE2E { + return dm[i].DisplayName < dm[j].DisplayName + } + return dm[i].IsE2E + }) + return dm +} + +// renderDocHTML renders html for documentation. +func renderDocHTML(dm []displayMeta) string { + htmlTmpl, err := template.New("htmlDoc").Parse(htmlTemplate) + if err != nil { + return fmt.Sprintf("error parsing template: %v", err) + } + var tpl bytes.Buffer + err = htmlTmpl.Execute(&tpl, docSort(dm)) + if err != nil { + return fmt.Sprintf("error executing template: %v", err) + } + return tpl.String() +} diff --git a/cli/bpcatalog/render_test.go b/cli/bpcatalog/render_test.go new file mode 100644 index 00000000000..1d6d73ac49d --- /dev/null +++ b/cli/bpcatalog/render_test.go @@ -0,0 +1,310 @@ +package bpcatalog + +import ( + "bytes" + "os" + "path" + "strings" + "testing" + "time" + + "github.com/google/go-github/v68/github" + "github.com/stretchr/testify/assert" +) + +const ( + expectedSuffix = ".expected" + updateEnvVar = "UPDATE_EXPECTED" + testDataDir = "../testdata/catalog" +) + +func TestRender(t *testing.T) { + testRepoData := repos{ + { + Name: github.Ptr("terraform-google-bar"), + CreatedAt: &github.Timestamp{Time: time.Date(2021, 1, 3, 4, 3, 0, 0, time.UTC)}, + StargazersCount: github.Ptr(5), + Description: github.Ptr("lorem ipsom"), + }, + { + Name: github.Ptr("terraform-google-foo"), + CreatedAt: &github.Timestamp{Time: time.Date(2022, 11, 3, 4, 3, 0, 0, time.UTC)}, + StargazersCount: github.Ptr(10), + }, + { + Name: github.Ptr("terraform-foo"), + CreatedAt: &github.Timestamp{Time: time.Date(2022, 11, 3, 4, 3, 0, 0, time.UTC)}, + StargazersCount: github.Ptr(10), + Topics: []string{"unrelated", e2eLabel, "containers"}, + }, + } + tests := []struct { + name string + r repos + format renderFormat + verbose bool + wantErr bool + }{ + { + name: "table", + r: testRepoData, + format: renderTable, + }, + { + name: "csv", + r: testRepoData, + format: renderCSV, + }, + { + name: "csv-verbose", + r: testRepoData, + format: renderCSV, + verbose: true, + }, + { + name: "html", + r: testRepoData, + format: renderHTML, + }, + { + name: "invalid", + r: testRepoData, + format: "invalid", + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var got bytes.Buffer + if err := render(tt.r, &got, tt.format, tt.verbose); (err != nil) != tt.wantErr { + t.Errorf("render() error = %v, wantErr %v", err, tt.wantErr) + } + if !tt.wantErr { + expectedPath := path.Join(testDataDir, tt.name+".expected") + updateExpected(t, expectedPath, got.String()) + expected := readFile(t, expectedPath) + assert.Equal(t, expected, got.String()) + } + }) + } +} + +func readFile(t *testing.T, p string) string { + t.Helper() + j, err := os.ReadFile(p) + if err != nil { + t.Fatalf("error reading file %s: %s", p, err) + } + return string(j) +} + +// UpdateExpected updates expected file at fp with data with update env var is set. +func updateExpected(t *testing.T, fp, data string) { + t.Helper() + if strings.ToLower(os.Getenv(updateEnvVar)) != "true" { + return + } + // 0755 allows read/execute for everyone, write for owner + // which is a safe default since this is test data. + // Execute bit is needed to traverse directories. + err := os.MkdirAll(path.Dir(fp), 0755) + if err != nil { + t.Fatalf("error updating result: %v", err) + } + + if _, err := os.Stat(fp); os.IsNotExist(err) { + _, err := os.Create(fp) + if err != nil { + t.Fatalf("error creating %s: %v", fp, err) + } + } + + err = os.WriteFile(fp, []byte(data), 0755) + if err != nil { + t.Fatalf("error updating result: %v", err) + } +} + +func TestDocSort(t *testing.T) { + tests := []struct { + name string + input []displayMeta + want []displayMeta + }{ + { + name: "simple", + input: []displayMeta{ + { + DisplayName: "a", + IsE2E: false, + }, + { + DisplayName: "b", + IsE2E: true, + }, + }, + want: []displayMeta{ + { + DisplayName: "b", + IsE2E: true, + }, + { + DisplayName: "a", + IsE2E: false, + }, + }, + }, + + { + name: "mutiple", + input: []displayMeta{ + { + DisplayName: "d", + IsE2E: true, + }, + { + DisplayName: "b", + IsE2E: false, + }, + { + DisplayName: "c", + IsE2E: false, + }, + { + DisplayName: "a", + IsE2E: true, + }, + }, + want: []displayMeta{ + { + DisplayName: "a", + IsE2E: true, + }, + { + DisplayName: "d", + IsE2E: true, + }, + { + DisplayName: "b", + IsE2E: false, + }, + { + DisplayName: "c", + IsE2E: false, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := docSort(tt.input) + assert.Equal(t, tt.want, got) + }) + } +} + +func TestReposToDisplayMeta(t *testing.T) { + tests := []struct { + name string + input repos + want []displayMeta + }{ + { + name: "simple", + input: repos{ + { + Name: github.Ptr("terraform-google-bar"), + CreatedAt: &github.Timestamp{Time: time.Date(2021, 1, 3, 4, 3, 0, 0, time.UTC)}, + StargazersCount: github.Ptr(5), + Description: github.Ptr("lorem ipsom"), + Topics: []string{"containers"}, + }, + { + Name: github.Ptr("terraform-foo"), + CreatedAt: &github.Timestamp{Time: time.Date(2022, 11, 3, 4, 3, 0, 0, time.UTC)}, + StargazersCount: github.Ptr(10), + Topics: []string{"unrelated", e2eLabel, "containers"}, + }, + { + Name: github.Ptr("foo"), + CreatedAt: &github.Timestamp{Time: time.Date(2022, 11, 3, 4, 3, 0, 0, time.UTC)}, + StargazersCount: github.Ptr(10), + }, + }, + want: []displayMeta{ + { + Name: "terraform-google-bar", + DisplayName: "bar", + Stars: "5", + CreatedAt: "2021-01-03", + Description: "lorem ipsom", + Labels: []string{"containers"}, + URL: "", + Categories: "Containers", + IsE2E: false, + }, + { + Name: "terraform-foo", + DisplayName: "foo", + Stars: "10", + CreatedAt: "2022-11-03", + Description: "", + Labels: []string{"unrelated", e2eLabel, "containers"}, + URL: "", + Categories: "Containers, End-to-end", + IsE2E: true, + }, + { + Name: "foo", + DisplayName: "foo", + Stars: "10", + CreatedAt: "2022-11-03", + Description: "", + Labels: []string(nil), + URL: "", + Categories: "", + IsE2E: false, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := reposToDisplayMeta(tt.input) + assert.Equal(t, tt.want, got) + }) + } +} + +func TestRenderDocHTML(t *testing.T) { + tests := []struct { + name string + input []displayMeta + }{ + { + name: "single-html", + input: []displayMeta{ + { + Name: "terraform-google-bar", + DisplayName: "bar", + Stars: "5", + CreatedAt: "2021-01-03", + Description: "lorem ipsom", + Labels: []string{"containers"}, + URL: "", + Categories: "Containers", + IsE2E: false, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := renderDocHTML(tt.input) + expectedPath := path.Join(testDataDir, tt.name+".expected") + updateExpected(t, expectedPath, got) + expected := readFile(t, expectedPath) + assert.Equal(t, expected, got) + }) + } +} diff --git a/cli/bpcatalog/tf.go b/cli/bpcatalog/tf.go new file mode 100644 index 00000000000..70d6d682090 --- /dev/null +++ b/cli/bpcatalog/tf.go @@ -0,0 +1,67 @@ +package bpcatalog + +import ( + "fmt" + "strings" + + "github.com/google/go-github/v68/github" +) + +// sortOption defines the set of sort options for catalog. +type sortOption string + +func (s *sortOption) String() string { + return string(*s) +} + +func (s *sortOption) Empty() bool { + return s.String() == "" +} + +func (s *sortOption) Set(v string) error { + f, err := sortOptionFromString(v) + if err != nil { + return err + } + *s = f + return nil +} + +func sortOptionFromString(s string) (sortOption, error) { + format := sortOption(s) + for _, stage := range sortOptions { + if format == stage { + return format, nil + } + } + return "", fmt.Errorf("one of %+v expected. unknown sort option: %s", sortOptions, s) +} + +func (r *sortOption) Type() string { + return "sortOption" +} + +const ( + sortStars sortOption = "stars" + sortCreated sortOption = "created" + sortName sortOption = "name" +) + +var ( + sortOptions = []sortOption{sortStars, sortCreated, sortName} +) + +// fetchSortedTFRepos returns a slice of repos sorted by sortOpt. +func fetchSortedTFRepos(gh *ghService, sortOpt sortOption) (repos, error) { + repos, err := gh.fetchRepos() + if err != nil { + return nil, fmt.Errorf("error fetching repos: %w", err) + } + repos = repos.filter(func(r *github.Repository) bool { + if r.GetArchived() { + return false + } + return repoAllowList[r.GetName()] || (strings.HasPrefix(r.GetName(), "terraform-google") && !repoIgnoreList[r.GetName()]) + }) + return repos.sort(sortOpt) +} diff --git a/cli/bpcatalog/tf_test.go b/cli/bpcatalog/tf_test.go new file mode 100644 index 00000000000..1bba34d49ce --- /dev/null +++ b/cli/bpcatalog/tf_test.go @@ -0,0 +1,145 @@ +package bpcatalog + +import ( + "testing" + "time" + + "github.com/google/go-github/v68/github" + "github.com/migueleliasweb/go-github-mock/src/mock" + "github.com/stretchr/testify/assert" +) + +func TestFetchSortedTFRepos(t *testing.T) { + mockT := time.Now() + tests := []struct { + name string + repos []github.Repository + sortBy sortOption + want []string + wantErr bool + }{ + { + name: "simple sort created", + repos: []github.Repository{ + { + Name: github.Ptr("terraform-google-bar"), + CreatedAt: &github.Timestamp{Time: mockT.Add(time.Hour * 3)}, + }, + { + Name: github.Ptr("terraform-google-foo"), + CreatedAt: &github.Timestamp{Time: mockT.Add(time.Hour * 2)}, + }, + { + Name: github.Ptr("foo"), + CreatedAt: &github.Timestamp{Time: mockT.Add(time.Hour * 2)}, + }, + }, + want: []string{ + "terraform-google-foo", + "terraform-google-bar", + }, + sortBy: sortCreated, + }, + { + name: "simple sort name", + repos: []github.Repository{ + { + Name: github.Ptr("terraform-google-bar"), + CreatedAt: &github.Timestamp{Time: mockT.Add(time.Hour * 3)}, + }, + { + Name: github.Ptr("terraform-google-foo"), + CreatedAt: &github.Timestamp{Time: mockT.Add(time.Hour * 2)}, + }, + { + Name: github.Ptr("foo"), + CreatedAt: &github.Timestamp{Time: mockT.Add(time.Hour * 2)}, + }, + }, + want: []string{ + "terraform-google-bar", + "terraform-google-foo", + }, + sortBy: sortName, + }, + { + name: "simple sort stars", + repos: []github.Repository{ + { + Name: github.Ptr("terraform-google-bar"), + CreatedAt: &github.Timestamp{Time: mockT.Add(time.Hour * 3)}, + StargazersCount: github.Ptr(5), + }, + { + Name: github.Ptr("terraform-google-foo"), + CreatedAt: &github.Timestamp{Time: mockT.Add(time.Hour * 2)}, + StargazersCount: github.Ptr(10), + }, + { + Name: github.Ptr("foo"), + CreatedAt: &github.Timestamp{Time: mockT.Add(time.Hour * 2)}, + StargazersCount: github.Ptr(12), + }, + { + Name: github.Ptr("archived"), + CreatedAt: &github.Timestamp{Time: mockT.Add(time.Hour * 2)}, + Archived: github.Ptr(true), + }, + }, + want: []string{ + "terraform-google-bar", + "terraform-google-foo", + }, + sortBy: sortStars, + }, + { + name: "invalid", + repos: []github.Repository{ + { + Name: github.Ptr("terraform-google-bar"), + CreatedAt: &github.Timestamp{Time: mockT.Add(time.Hour * 3)}, + StargazersCount: github.Ptr(5), + }, + { + Name: github.Ptr("terraform-google-foo"), + CreatedAt: &github.Timestamp{Time: mockT.Add(time.Hour * 2)}, + StargazersCount: github.Ptr(10), + }, + { + Name: github.Ptr("foo"), + CreatedAt: &github.Timestamp{Time: mockT.Add(time.Hour * 2)}, + StargazersCount: github.Ptr(12), + }, + }, + wantErr: true, + sortBy: "baz", + }, + { + name: "empty", + repos: []github.Repository{}, + wantErr: false, + sortBy: "name", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockedHTTPClient := mock.NewMockedHTTPClient( + mock.WithRequestMatch( + mock.GetOrgsReposByOrg, + tt.repos, + ), + ) + mockGHService := newGHService(withClient(mockedHTTPClient), withOrgs([]string{"foo"})) + got, err := fetchSortedTFRepos(mockGHService, tt.sortBy) + if (err != nil) != tt.wantErr { + t.Errorf("fetchSortedTFRepos() error = %v, wantErr %v", err, tt.wantErr) + return + } + var gotRepoNames []string + for _, r := range got { + gotRepoNames = append(gotRepoNames, r.GetName()) + } + assert.Equal(t, tt.want, gotRepoNames) + }) + } +} diff --git a/cli/bpmetadata/bpmetadata.pb.go b/cli/bpmetadata/bpmetadata.pb.go new file mode 100644 index 00000000000..b46e93b9296 --- /dev/null +++ b/cli/bpmetadata/bpmetadata.pb.go @@ -0,0 +1,3415 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.23.4 +// source: bpmetadata.proto + +package bpmetadata + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// QuotaResourceType defines the type of resource a quota is applied to. +type QuotaResourceType int32 + +const ( + QuotaResourceType_QRT_UNDEFINED QuotaResourceType = 0 + QuotaResourceType_QRT_RESOURCE_TYPE_GCE_INSTANCE QuotaResourceType = 1 + QuotaResourceType_QRT_RESOURCE_TYPE_GCE_DISK QuotaResourceType = 2 +) + +// Enum value maps for QuotaResourceType. +var ( + QuotaResourceType_name = map[int32]string{ + 0: "QRT_UNDEFINED", + 1: "QRT_RESOURCE_TYPE_GCE_INSTANCE", + 2: "QRT_RESOURCE_TYPE_GCE_DISK", + } + QuotaResourceType_value = map[string]int32{ + "QRT_UNDEFINED": 0, + "QRT_RESOURCE_TYPE_GCE_INSTANCE": 1, + "QRT_RESOURCE_TYPE_GCE_DISK": 2, + } +) + +func (x QuotaResourceType) Enum() *QuotaResourceType { + p := new(QuotaResourceType) + *p = x + return p +} + +func (x QuotaResourceType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (QuotaResourceType) Descriptor() protoreflect.EnumDescriptor { + return file_bpmetadata_proto_enumTypes[0].Descriptor() +} + +func (QuotaResourceType) Type() protoreflect.EnumType { + return &file_bpmetadata_proto_enumTypes[0] +} + +func (x QuotaResourceType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use QuotaResourceType.Descriptor instead. +func (QuotaResourceType) EnumDescriptor() ([]byte, []int) { + return file_bpmetadata_proto_rawDescGZIP(), []int{0} +} + +// SoftwareGroupType is a string enum representing the different types of software groups. +type SoftwareGroupType int32 + +const ( + // UNSPECIFIED is the default value for SoftwareGroupType. + SoftwareGroupType_SG_UNSPECIFIED SoftwareGroupType = 0 + // OS is a software group that represents an operating system. + SoftwareGroupType_SG_OS SoftwareGroupType = 1 +) + +// Enum value maps for SoftwareGroupType. +var ( + SoftwareGroupType_name = map[int32]string{ + 0: "SG_UNSPECIFIED", + 1: "SG_OS", + } + SoftwareGroupType_value = map[string]int32{ + "SG_UNSPECIFIED": 0, + "SG_OS": 1, + } +) + +func (x SoftwareGroupType) Enum() *SoftwareGroupType { + p := new(SoftwareGroupType) + *p = x + return p +} + +func (x SoftwareGroupType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SoftwareGroupType) Descriptor() protoreflect.EnumDescriptor { + return file_bpmetadata_proto_enumTypes[1].Descriptor() +} + +func (SoftwareGroupType) Type() protoreflect.EnumType { + return &file_bpmetadata_proto_enumTypes[1] +} + +func (x SoftwareGroupType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SoftwareGroupType.Descriptor instead. +func (SoftwareGroupType) EnumDescriptor() ([]byte, []int) { + return file_bpmetadata_proto_rawDescGZIP(), []int{1} +} + +// BlueprintMetadata defines the overall structure for blueprint metadata. +// The cli command i.e. `cft blueprint metadata` attempts at auto-generating +// metadata if the blueprint is structured based on the TF blueprint template +// i.e. https://github.com/terraform-google-modules/terraform-google-module-template +// All fields within BlueprintMetadata and its children are denoted as: +// - Gen: auto-generated - +// - Gen: manually-authored +// - Gen: partial (contains nested messages that can include both auto-generated and manually authored) +type BlueprintMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // APIVersion is the apiVersion field of a metadata file + // Gen: auto-generated + ApiVersion string `protobuf:"bytes,1,opt,name=api_version,json=apiVersion,proto3" json:"apiVersion,omitempty" yaml:"apiVersion,omitempty"` // @gotags: json:"apiVersion,omitempty" yaml:"apiVersion,omitempty" + // Kind is the kind field of a metadata file + // Gen: auto-generated + Kind string `protobuf:"bytes,2,opt,name=kind,proto3" json:"kind,omitempty" yaml:"kind,omitempty"` // @gotags: json:"kind,omitempty" yaml:"kind,omitempty" + // ResourceTypeMeta is the metadata field of a metadata file + // Gen: partial + Metadata *ResourceTypeMeta `protobuf:"bytes,3,opt,name=metadata,proto3" json:"metadata,omitempty" yaml:"metadata,omitempty"` // @gotags: json:"metadata,omitempty" yaml:"metadata,omitempty" + // BlueprintMetadataSpec is the metadata specification for the blueprint + // Gen: partial + Spec *BlueprintMetadataSpec `protobuf:"bytes,4,opt,name=spec,proto3" json:"spec" yaml:"spec"` // @gotags: yaml:"spec" json:"spec" +} + +func (x *BlueprintMetadata) Reset() { + *x = BlueprintMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlueprintMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlueprintMetadata) ProtoMessage() {} + +func (x *BlueprintMetadata) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlueprintMetadata.ProtoReflect.Descriptor instead. +func (*BlueprintMetadata) Descriptor() ([]byte, []int) { + return file_bpmetadata_proto_rawDescGZIP(), []int{0} +} + +func (x *BlueprintMetadata) GetApiVersion() string { + if x != nil { + return x.ApiVersion + } + return "" +} + +func (x *BlueprintMetadata) GetKind() string { + if x != nil { + return x.Kind + } + return "" +} + +func (x *BlueprintMetadata) GetMetadata() *ResourceTypeMeta { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *BlueprintMetadata) GetSpec() *BlueprintMetadataSpec { + if x != nil { + return x.Spec + } + return nil +} + +type ResourceTypeMeta struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Name is the metadata.name field of a Resource + // Gen: auto-generated + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty" yaml:"name,omitempty"` // @gotags: json:"name,omitempty" yaml:"name,omitempty" + // Labels is the metadata.labels field of a Resource + // Gen: manually-authored + Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3" yaml:"labels,omitempty"` // @gotags: json:"labels,omitempty" yaml:"labels,omitempty" + // Annotations is the metadata.annotations field of a Resource. + // Gen: auto-generated + Annotations map[string]string `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3" yaml:"annotations,omitempty"` // @gotags: json:"annotations,omitempty" yaml:"annotations,omitempty" +} + +func (x *ResourceTypeMeta) Reset() { + *x = ResourceTypeMeta{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceTypeMeta) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceTypeMeta) ProtoMessage() {} + +func (x *ResourceTypeMeta) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceTypeMeta.ProtoReflect.Descriptor instead. +func (*ResourceTypeMeta) Descriptor() ([]byte, []int) { + return file_bpmetadata_proto_rawDescGZIP(), []int{1} +} + +func (x *ResourceTypeMeta) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ResourceTypeMeta) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +func (x *ResourceTypeMeta) GetAnnotations() map[string]string { + if x != nil { + return x.Annotations + } + return nil +} + +// BlueprintMetadataSpec defines the spec portion of the blueprint metadata. +type BlueprintMetadataSpec struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // BlueprintInfo defines the basic information of the blueprint. + // Gen: partial + Info *BlueprintInfo `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty" yaml:"info,omitempty"` // @gotags: json:"info,omitempty" yaml:"info,omitempty" + // BlueprintContent defines the detail for blueprint related content such as + // related documentation, diagrams, examples etc. + // Gen: partial + Content *BlueprintContent `protobuf:"bytes,2,opt,name=content,proto3" json:"content,omitempty" yaml:"content,omitempty"` // @gotags: json:"content,omitempty" yaml:"content,omitempty" + // BlueprintInterface defines the input and output variables for the blueprint. + // Gen: partial + Interfaces *BlueprintInterface `protobuf:"bytes,3,opt,name=interfaces,proto3" json:"interfaces,omitempty" yaml:"interfaces,omitempty"` // @gotags: json:"interfaces,omitempty" yaml:"interfaces,omitempty" + // BlueprintRequirements defines the roles required and the associated services + // that need to be enabled to provision blueprint resources. + // Gen: auto-generated + Requirements *BlueprintRequirements `protobuf:"bytes,4,opt,name=requirements,proto3" json:"requirements,omitempty" yaml:"requirements,omitempty"` // @gotags: json:"requirements,omitempty" yaml:"requirements,omitempty" + // BlueprintUI defines the user interface for the blueprint. + // Gen: partial + Ui *BlueprintUI `protobuf:"bytes,5,opt,name=ui,proto3" json:"ui,omitempty" yaml:"ui,omitempty"` // @gotags: json:"ui,omitempty" yaml:"ui,omitempty" +} + +func (x *BlueprintMetadataSpec) Reset() { + *x = BlueprintMetadataSpec{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlueprintMetadataSpec) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlueprintMetadataSpec) ProtoMessage() {} + +func (x *BlueprintMetadataSpec) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlueprintMetadataSpec.ProtoReflect.Descriptor instead. +func (*BlueprintMetadataSpec) Descriptor() ([]byte, []int) { + return file_bpmetadata_proto_rawDescGZIP(), []int{2} +} + +func (x *BlueprintMetadataSpec) GetInfo() *BlueprintInfo { + if x != nil { + return x.Info + } + return nil +} + +func (x *BlueprintMetadataSpec) GetContent() *BlueprintContent { + if x != nil { + return x.Content + } + return nil +} + +func (x *BlueprintMetadataSpec) GetInterfaces() *BlueprintInterface { + if x != nil { + return x.Interfaces + } + return nil +} + +func (x *BlueprintMetadataSpec) GetRequirements() *BlueprintRequirements { + if x != nil { + return x.Requirements + } + return nil +} + +func (x *BlueprintMetadataSpec) GetUi() *BlueprintUI { + if x != nil { + return x.Ui + } + return nil +} + +// BlueprintInfo defines the basic information of the blueprint. +type BlueprintInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Title for the blueprint. + // Gen: auto-generated - First H1 text in readme.md. + Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title" yaml:"title"` // @gotags: json:"title" yaml:"title" + // Blueprint source location and source type. + // Gen: auto-generated - user will be prompted if repo information can not + // be determined from the blueprint path. + Source *BlueprintRepoDetail `protobuf:"bytes,2,opt,name=source,proto3" json:"source,omitempty" yaml:"source,omitempty"` // @gotags: json:"source,omitempty" yaml:"source,omitempty" + // Last released semantic version for the packaged blueprint. + // Gen: auto-generated - From the `module_name` attribute of + // the `provider_meta "google"` block. + // E.g. + // + // provider_meta "google" { + // module_name = "blueprints/terraform/terraform-google-log-analysis/v0.1.5" + // } + Version string `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty" yaml:"version,omitempty"` // @gotags: json:"version,omitempty" yaml:"version,omitempty" + // Actuation tool e.g. Terraform and its required version. + // Gen: auto-generated + ActuationTool *BlueprintActuationTool `protobuf:"bytes,4,opt,name=actuation_tool,json=actuationTool,proto3" json:"actuationTool,omitempty" yaml:"actuationTool,omitempty"` // @gotags: json:"actuationTool,omitempty" yaml:"actuationTool,omitempty" + // Various types of descriptions associated with the blueprint. + // Gen: auto-generated + Description *BlueprintDescription `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty" yaml:"description,omitempty"` // @gotags: json:"description,omitempty" yaml:"description,omitempty" + // Path to an image representing the icon for the blueprint. + // Will be set as "assets/icon.png", if present. + // Gen: auto-generated + Icon string `protobuf:"bytes,6,opt,name=icon,proto3" json:"icon,omitempty" yaml:"icon,omitempty"` // @gotags: json:"icon,omitempty" yaml:"icon,omitempty" + // The time estimate for configuring and deploying the blueprint. + // Gen: auto-generated + DeploymentDuration *BlueprintTimeEstimate `protobuf:"bytes,7,opt,name=deployment_duration,json=deploymentDuration,proto3" json:"deploymentDuration,omitempty" yaml:"deploymentDuration,omitempty"` // @gotags: json:"deploymentDuration,omitempty" yaml:"deploymentDuration,omitempty" + // The cost estimate for the blueprint based on preconfigured variables. + // Gen: auto-generated + CostEstimate *BlueprintCostEstimate `protobuf:"bytes,8,opt,name=cost_estimate,json=costEstimate,proto3" json:"costEstimate,omitempty" yaml:"costEstimate,omitempty"` // @gotags: json:"costEstimate,omitempty" yaml:"costEstimate,omitempty" + // A list of GCP cloud products used in the blueprint. + // Gen: manually-authored + CloudProducts []*BlueprintCloudProduct `protobuf:"bytes,9,rep,name=cloud_products,json=cloudProducts,proto3" json:"cloudProducts,omitempty" yaml:"cloudProducts,omitempty"` // @gotags: json:"cloudProducts,omitempty" yaml:"cloudProducts,omitempty" + // A configuration of fixed and dynamic GCP quotas that apply to the blueprint. + // Gen: manually-authored + QuotaDetails []*BlueprintQuotaDetail `protobuf:"bytes,10,rep,name=quota_details,json=quotaDetails,proto3" json:"quotaDetails,omitempty" yaml:"quotaDetails,omitempty"` // @gotags: json:"quotaDetails,omitempty" yaml:"quotaDetails,omitempty" + // Details on the author producing the blueprint. + // Gen: manually-authored + Author *BlueprintAuthor `protobuf:"bytes,11,opt,name=author,proto3" json:"author,omitempty" yaml:"author,omitempty"` // @gotags: json:"author,omitempty" yaml:"author,omitempty" + // Details on software installed as part of the blueprint. + // Gen: manually-authored + SoftwareGroups []*BlueprintSoftwareGroup `protobuf:"bytes,12,rep,name=software_groups,json=softwareGroups,proto3" json:"softwareGroups,omitempty" yaml:"softwareGroups,omitempty"` // @gotags: json:"softwareGroups,omitempty" yaml:"softwareGroups,omitempty" + // Support offered, if any for the blueprint. + // Gen: manually-authored + SupportInfo *BlueprintSupport `protobuf:"bytes,13,opt,name=support_info,json=supportInfo,proto3" json:"supportInfo,omitempty" yaml:"supportInfo,omitempty"` // @gotags: json:"supportInfo,omitempty" yaml:"supportInfo,omitempty" + // A list of GCP org policies to be checked for successful deployment. + // Gen: manually-authored + OrgPolicyChecks []*BlueprintOrgPolicyCheck `protobuf:"bytes,14,rep,name=org_policy_checks,json=orgPolicyChecks,proto3" json:"orgPolicyChecks,omitempty" yaml:"orgPolicyChecks,omitempty"` // @gotags: json:"orgPolicyChecks,omitempty" yaml:"orgPolicyChecks,omitempty" + // Specifies if the blueprint supports single or multiple deployments per GCP project. + // If set to true, the blueprint can not be deployed more than once in the same GCP project. + // Gen: manually-authored + SingleDeployment bool `protobuf:"varint,15,opt,name=single_deployment,json=singleDeployment,proto3" json:"singleDeployment,omitempty" yaml:"singleDeployment,omitempty"` // @gotags: json:"singleDeployment,omitempty" yaml:"singleDeployment,omitempty" +} + +func (x *BlueprintInfo) Reset() { + *x = BlueprintInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlueprintInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlueprintInfo) ProtoMessage() {} + +func (x *BlueprintInfo) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlueprintInfo.ProtoReflect.Descriptor instead. +func (*BlueprintInfo) Descriptor() ([]byte, []int) { + return file_bpmetadata_proto_rawDescGZIP(), []int{3} +} + +func (x *BlueprintInfo) GetTitle() string { + if x != nil { + return x.Title + } + return "" +} + +func (x *BlueprintInfo) GetSource() *BlueprintRepoDetail { + if x != nil { + return x.Source + } + return nil +} + +func (x *BlueprintInfo) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *BlueprintInfo) GetActuationTool() *BlueprintActuationTool { + if x != nil { + return x.ActuationTool + } + return nil +} + +func (x *BlueprintInfo) GetDescription() *BlueprintDescription { + if x != nil { + return x.Description + } + return nil +} + +func (x *BlueprintInfo) GetIcon() string { + if x != nil { + return x.Icon + } + return "" +} + +func (x *BlueprintInfo) GetDeploymentDuration() *BlueprintTimeEstimate { + if x != nil { + return x.DeploymentDuration + } + return nil +} + +func (x *BlueprintInfo) GetCostEstimate() *BlueprintCostEstimate { + if x != nil { + return x.CostEstimate + } + return nil +} + +func (x *BlueprintInfo) GetCloudProducts() []*BlueprintCloudProduct { + if x != nil { + return x.CloudProducts + } + return nil +} + +func (x *BlueprintInfo) GetQuotaDetails() []*BlueprintQuotaDetail { + if x != nil { + return x.QuotaDetails + } + return nil +} + +func (x *BlueprintInfo) GetAuthor() *BlueprintAuthor { + if x != nil { + return x.Author + } + return nil +} + +func (x *BlueprintInfo) GetSoftwareGroups() []*BlueprintSoftwareGroup { + if x != nil { + return x.SoftwareGroups + } + return nil +} + +func (x *BlueprintInfo) GetSupportInfo() *BlueprintSupport { + if x != nil { + return x.SupportInfo + } + return nil +} + +func (x *BlueprintInfo) GetOrgPolicyChecks() []*BlueprintOrgPolicyCheck { + if x != nil { + return x.OrgPolicyChecks + } + return nil +} + +func (x *BlueprintInfo) GetSingleDeployment() bool { + if x != nil { + return x.SingleDeployment + } + return false +} + +// BlueprintContent defines the detail for blueprint related content such as +// related documentation, diagrams, examples etc. +type BlueprintContent struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Gen: auto-generated + Architecture *BlueprintArchitecture `protobuf:"bytes,1,opt,name=architecture,proto3" json:"architecture,omitempty" yaml:"architecture,omitempty"` // @gotags: json:"architecture,omitempty" yaml:"architecture,omitempty" + // Gen: manually-authored + Diagrams []*BlueprintDiagram `protobuf:"bytes,2,rep,name=diagrams,proto3" json:"diagrams,omitempty" yaml:"diagrams,omitempty"` // @gotags: json:"diagrams,omitempty" yaml:"diagrams,omitempty" + // Gen: auto-generated - the list content following the "## Documentation" tag. E.g. + // ## Documentation + // - [Hosting a Static Website](https://cloud.google.com/storage/docs/hosting-static-website) + Documentation []*BlueprintListContent `protobuf:"bytes,3,rep,name=documentation,proto3" json:"documentation,omitempty" yaml:"documentation,omitempty"` // @gotags: json:"documentation,omitempty" yaml:"documentation,omitempty" + // Gen: auto-generated - blueprints under the modules/ folder. + SubBlueprints []*BlueprintMiscContent `protobuf:"bytes,4,rep,name=sub_blueprints,json=subBlueprints,proto3" json:"subBlueprints,omitempty" yaml:"subBlueprints,omitempty"` // @gotags: json:"subBlueprints,omitempty" yaml:"subBlueprints,omitempty" + // Gen: auto-generated - examples under the examples/ folder. + Examples []*BlueprintMiscContent `protobuf:"bytes,5,rep,name=examples,proto3" json:"examples,omitempty" yaml:"examples,omitempty"` // @gotags: json:"examples,omitempty" yaml:"examples,omitempty" +} + +func (x *BlueprintContent) Reset() { + *x = BlueprintContent{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlueprintContent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlueprintContent) ProtoMessage() {} + +func (x *BlueprintContent) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlueprintContent.ProtoReflect.Descriptor instead. +func (*BlueprintContent) Descriptor() ([]byte, []int) { + return file_bpmetadata_proto_rawDescGZIP(), []int{4} +} + +func (x *BlueprintContent) GetArchitecture() *BlueprintArchitecture { + if x != nil { + return x.Architecture + } + return nil +} + +func (x *BlueprintContent) GetDiagrams() []*BlueprintDiagram { + if x != nil { + return x.Diagrams + } + return nil +} + +func (x *BlueprintContent) GetDocumentation() []*BlueprintListContent { + if x != nil { + return x.Documentation + } + return nil +} + +func (x *BlueprintContent) GetSubBlueprints() []*BlueprintMiscContent { + if x != nil { + return x.SubBlueprints + } + return nil +} + +func (x *BlueprintContent) GetExamples() []*BlueprintMiscContent { + if x != nil { + return x.Examples + } + return nil +} + +// BlueprintInterface defines the input and output variables for the blueprint. +type BlueprintInterface struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Gen: auto-generated - all defined variables for the blueprint + Variables []*BlueprintVariable `protobuf:"bytes,1,rep,name=variables,proto3" json:"variables,omitempty" yaml:"variables,omitempty"` // @gotags: json:"variables,omitempty" yaml:"variables,omitempty" + // Gen: manually-authored + VariableGroups []*BlueprintVariableGroup `protobuf:"bytes,2,rep,name=variable_groups,json=variableGroups,proto3" json:"variableGroups,omitempty" yaml:"variableGroups,omitempty"` // @gotags: json:"variableGroups,omitempty" yaml:"variableGroups,omitempty" + // Gen: auto-generated - all defined outputs for the blueprint + Outputs []*BlueprintOutput `protobuf:"bytes,3,rep,name=outputs,proto3" json:"outputs,omitempty" yaml:"outputs,omitempty"` // @gotags: json:"outputs,omitempty" yaml:"outputs,omitempty" +} + +func (x *BlueprintInterface) Reset() { + *x = BlueprintInterface{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlueprintInterface) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlueprintInterface) ProtoMessage() {} + +func (x *BlueprintInterface) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlueprintInterface.ProtoReflect.Descriptor instead. +func (*BlueprintInterface) Descriptor() ([]byte, []int) { + return file_bpmetadata_proto_rawDescGZIP(), []int{5} +} + +func (x *BlueprintInterface) GetVariables() []*BlueprintVariable { + if x != nil { + return x.Variables + } + return nil +} + +func (x *BlueprintInterface) GetVariableGroups() []*BlueprintVariableGroup { + if x != nil { + return x.VariableGroups + } + return nil +} + +func (x *BlueprintInterface) GetOutputs() []*BlueprintOutput { + if x != nil { + return x.Outputs + } + return nil +} + +// BlueprintRequirements defines the roles required and the associated services +// that need to be enabled to provision blueprint resources. +type BlueprintRequirements struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Gen: auto-generated - all roles required for the blueprint in test/setup/iam.tf + // as the "int_required_roles" local. E.g. + // + // locals { + // int_required_roles = [ + // "roles/compute.admin", + // ] + // } + Roles []*BlueprintRoles `protobuf:"bytes,1,rep,name=roles,proto3" json:"roles,omitempty" yaml:"roles,omitempty"` // @gotags: json:"roles,omitempty" yaml:"roles,omitempty" + // Gen: auto-generated - all services required for the blueprint in test/setup/main.tf + // as "activate_apis" in the project module. + Services []string `protobuf:"bytes,2,rep,name=services,proto3" json:"services,omitempty" yaml:"services,omitempty"` // @gotags: json:"services,omitempty" yaml:"services,omitempty" + // Required provider versions. + // Gen: auto-generated from required providers block. + ProviderVersions []*ProviderVersion `protobuf:"bytes,3,rep,name=provider_versions,json=providerVersions,proto3" json:"providerVersions,omitempty" yaml:"providerVersions,omitempty"` // @gotags: json:"providerVersions,omitempty" yaml:"providerVersions,omitempty" +} + +func (x *BlueprintRequirements) Reset() { + *x = BlueprintRequirements{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlueprintRequirements) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlueprintRequirements) ProtoMessage() {} + +func (x *BlueprintRequirements) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlueprintRequirements.ProtoReflect.Descriptor instead. +func (*BlueprintRequirements) Descriptor() ([]byte, []int) { + return file_bpmetadata_proto_rawDescGZIP(), []int{6} +} + +func (x *BlueprintRequirements) GetRoles() []*BlueprintRoles { + if x != nil { + return x.Roles + } + return nil +} + +func (x *BlueprintRequirements) GetServices() []string { + if x != nil { + return x.Services + } + return nil +} + +func (x *BlueprintRequirements) GetProviderVersions() []*ProviderVersion { + if x != nil { + return x.ProviderVersions + } + return nil +} + +// ProviderVersion defines the required version for a provider. +type ProviderVersion struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Provider source of form [hostname]/namespace/name. + // Hostname is optional defaulting to Terraform registry. + // Gen: auto-generated from required providers block. + Source string `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty" yaml:"source,omitempty"` // @gotags: json:"source,omitempty" yaml:"source,omitempty" + // Version constraint string. + // Gen: auto-generated from required providers block. + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty" yaml:"version,omitempty"` // @gotags: json:"version,omitempty" yaml:"version,omitempty" +} + +func (x *ProviderVersion) Reset() { + *x = ProviderVersion{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProviderVersion) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProviderVersion) ProtoMessage() {} + +func (x *ProviderVersion) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProviderVersion.ProtoReflect.Descriptor instead. +func (*ProviderVersion) Descriptor() ([]byte, []int) { + return file_bpmetadata_proto_rawDescGZIP(), []int{7} +} + +func (x *ProviderVersion) GetSource() string { + if x != nil { + return x.Source + } + return "" +} + +func (x *ProviderVersion) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +// BlueprintUI is the top-level structure for holding UI specific metadata. +type BlueprintUI struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The top-level input section that defines the list of variables and + // their sections on the deployment page. + // Gen: partial + Input *BlueprintUIInput `protobuf:"bytes,1,opt,name=input,proto3" json:"input,omitempty" yaml:"input,omitempty"` // @gotags: json:"input,omitempty" yaml:"input,omitempty" + // The top-level section for listing runtime (or blueprint output) information + // i.e. the console URL for the VM or a button to ssh into the VM etc based on. + // Gen: manually-authored + Runtime *BlueprintUIOutput `protobuf:"bytes,2,opt,name=runtime,proto3" json:"runtime,omitempty" yaml:"runtime,omitempty"` // @gotags: json:"runtime,omitempty" yaml:"runtime,omitempty" +} + +func (x *BlueprintUI) Reset() { + *x = BlueprintUI{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlueprintUI) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlueprintUI) ProtoMessage() {} + +func (x *BlueprintUI) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlueprintUI.ProtoReflect.Descriptor instead. +func (*BlueprintUI) Descriptor() ([]byte, []int) { + return file_bpmetadata_proto_rawDescGZIP(), []int{8} +} + +func (x *BlueprintUI) GetInput() *BlueprintUIInput { + if x != nil { + return x.Input + } + return nil +} + +func (x *BlueprintUI) GetRuntime() *BlueprintUIOutput { + if x != nil { + return x.Runtime + } + return nil +} + +type BlueprintRepoDetail struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Gen: auto-generated - URL from the .git dir. + // Can be manually overridden with a custom URL if needed. + Repo string `protobuf:"bytes,1,opt,name=repo,proto3" json:"repo" yaml:"repo"` // @gotags: json:"repo" yaml:"repo" + // Gen: auto-generated - set as "git" for now until more + // types are supported. + SourceType string `protobuf:"bytes,2,opt,name=source_type,json=sourceType,proto3" json:"sourceType" yaml:"sourceType"` // @gotags: json:"sourceType" yaml:"sourceType" + // Gen: auto-generated - not set for root modules but + // set as the module name for submodules, if found. + Dir string `protobuf:"bytes,3,opt,name=dir,proto3" json:"dir,omitempty" yaml:"dir,omitempty"` // @gotags: json:"dir,omitempty" yaml:"dir,omitempty" +} + +func (x *BlueprintRepoDetail) Reset() { + *x = BlueprintRepoDetail{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlueprintRepoDetail) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlueprintRepoDetail) ProtoMessage() {} + +func (x *BlueprintRepoDetail) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlueprintRepoDetail.ProtoReflect.Descriptor instead. +func (*BlueprintRepoDetail) Descriptor() ([]byte, []int) { + return file_bpmetadata_proto_rawDescGZIP(), []int{9} +} + +func (x *BlueprintRepoDetail) GetRepo() string { + if x != nil { + return x.Repo + } + return "" +} + +func (x *BlueprintRepoDetail) GetSourceType() string { + if x != nil { + return x.SourceType + } + return "" +} + +func (x *BlueprintRepoDetail) GetDir() string { + if x != nil { + return x.Dir + } + return "" +} + +// BlueprintActuationTool defines the actuation tool used to provision the blueprint. +type BlueprintActuationTool struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Gen: auto-generated - set as "Terraform" for now until + // more flavors are supported. + Flavor string `protobuf:"bytes,1,opt,name=flavor,proto3" json:"flavor,omitempty" yaml:"flavor,omitempty"` // @gotags: json:"flavor,omitempty" yaml:"flavor,omitempty" + // Required version for the actuation tool. + // Gen: auto-generated - For Terraform this is the `required_version` + // set in `terraform` block. E.g. + // + // terraform { + // required_version = ">= 0.13" + // } + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty" yaml:"version,omitempty"` // @gotags: json:"version,omitempty" yaml:"version,omitempty" +} + +func (x *BlueprintActuationTool) Reset() { + *x = BlueprintActuationTool{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlueprintActuationTool) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlueprintActuationTool) ProtoMessage() {} + +func (x *BlueprintActuationTool) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlueprintActuationTool.ProtoReflect.Descriptor instead. +func (*BlueprintActuationTool) Descriptor() ([]byte, []int) { + return file_bpmetadata_proto_rawDescGZIP(), []int{10} +} + +func (x *BlueprintActuationTool) GetFlavor() string { + if x != nil { + return x.Flavor + } + return "" +} + +func (x *BlueprintActuationTool) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +// All descriptions are set with the markdown content immediately +// after each type's heading declaration in readme.md. +type BlueprintDescription struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Gen: auto-generated - Markdown after "### Tagline". + Tagline string `protobuf:"bytes,1,opt,name=tagline,proto3" json:"tagline,omitempty" yaml:"tagline,omitempty"` // @gotags: json:"tagline,omitempty" yaml:"tagline,omitempty" + // Gen: auto-generated - Markdown after "### Detailed". + Detailed string `protobuf:"bytes,2,opt,name=detailed,proto3" json:"detailed,omitempty" yaml:"detailed,omitempty"` // @gotags: json:"detailed,omitempty" yaml:"detailed,omitempty" + // Gen: auto-generated - Markdown after "### PreDeploy". + PreDeploy string `protobuf:"bytes,3,opt,name=pre_deploy,json=preDeploy,proto3" json:"preDeploy,omitempty" yaml:"preDeploy,omitempty"` // @gotags: json:"preDeploy,omitempty" yaml:"preDeploy,omitempty" + // Gen: auto-generated - Markdown after "### Html". + Html string `protobuf:"bytes,4,opt,name=html,proto3" json:"html,omitempty" yaml:"html,omitempty"` // @gotags: json:"html,omitempty" yaml:"html,omitempty" + // Gen: auto-generated - Markdown after "### EulaUrls". + EulaUrls []string `protobuf:"bytes,5,rep,name=eula_urls,json=eulaUrls,proto3" json:"eulaUrls,omitempty" yaml:"eulaUrls,omitempty"` // @gotags: json:"eulaUrls,omitempty" yaml:"eulaUrls,omitempty" + // Gen: auto-generated - Markdown after "### Architecture" + // Deprecated. Use BlueprintContent.Architecture instead. + Architecture []string `protobuf:"bytes,6,rep,name=architecture,proto3" json:"architecture,omitempty" yaml:"architecture,omitempty"` // @gotags: json:"architecture,omitempty" yaml:"architecture,omitempty" +} + +func (x *BlueprintDescription) Reset() { + *x = BlueprintDescription{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlueprintDescription) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlueprintDescription) ProtoMessage() {} + +func (x *BlueprintDescription) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlueprintDescription.ProtoReflect.Descriptor instead. +func (*BlueprintDescription) Descriptor() ([]byte, []int) { + return file_bpmetadata_proto_rawDescGZIP(), []int{11} +} + +func (x *BlueprintDescription) GetTagline() string { + if x != nil { + return x.Tagline + } + return "" +} + +func (x *BlueprintDescription) GetDetailed() string { + if x != nil { + return x.Detailed + } + return "" +} + +func (x *BlueprintDescription) GetPreDeploy() string { + if x != nil { + return x.PreDeploy + } + return "" +} + +func (x *BlueprintDescription) GetHtml() string { + if x != nil { + return x.Html + } + return "" +} + +func (x *BlueprintDescription) GetEulaUrls() []string { + if x != nil { + return x.EulaUrls + } + return nil +} + +func (x *BlueprintDescription) GetArchitecture() []string { + if x != nil { + return x.Architecture + } + return nil +} + +// A time estimate in secs required for configuring and deploying the blueprint. +type BlueprintTimeEstimate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Gen: auto-generated - Set using the content defined under "### DeploymentTime" E.g. + // ### DeploymentTime + // - Configuration: X secs + // - Deployment: Y secs + ConfigurationSecs int64 `protobuf:"varint,1,opt,name=configuration_secs,json=configurationSecs,proto3" json:"configurationSecs,omitempty" yaml:"configurationSecs,omitempty"` // @gotags: json:"configurationSecs,omitempty" yaml:"configurationSecs,omitempty" + DeploymentSecs int64 `protobuf:"varint,2,opt,name=deployment_secs,json=deploymentSecs,proto3" json:"deploymentSecs,omitempty" yaml:"deploymentSecs,omitempty"` // @gotags: json:"deploymentSecs,omitempty" yaml:"deploymentSecs,omitempty" +} + +func (x *BlueprintTimeEstimate) Reset() { + *x = BlueprintTimeEstimate{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlueprintTimeEstimate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlueprintTimeEstimate) ProtoMessage() {} + +func (x *BlueprintTimeEstimate) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlueprintTimeEstimate.ProtoReflect.Descriptor instead. +func (*BlueprintTimeEstimate) Descriptor() ([]byte, []int) { + return file_bpmetadata_proto_rawDescGZIP(), []int{12} +} + +func (x *BlueprintTimeEstimate) GetConfigurationSecs() int64 { + if x != nil { + return x.ConfigurationSecs + } + return 0 +} + +func (x *BlueprintTimeEstimate) GetDeploymentSecs() int64 { + if x != nil { + return x.DeploymentSecs + } + return 0 +} + +// The cost estimate for the blueprint based on pre-configured variables. +type BlueprintCostEstimate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Gen: auto-generated - Set using the content defined under "### Cost" as a link + // with a description E.g. + // ### Cost + // [$20.00](https://cloud.google.com/products/calculator?hl=en_US&_ga=2.1665458.-226505189.1675191136#id=02fb0c45-cc29-4567-8cc6-f72ac9024add) + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description" yaml:"description"` // @gotags: json:"description" yaml:"description" + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url" yaml:"url"` // @gotags: json:"url" yaml:"url" +} + +func (x *BlueprintCostEstimate) Reset() { + *x = BlueprintCostEstimate{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlueprintCostEstimate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlueprintCostEstimate) ProtoMessage() {} + +func (x *BlueprintCostEstimate) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlueprintCostEstimate.ProtoReflect.Descriptor instead. +func (*BlueprintCostEstimate) Descriptor() ([]byte, []int) { + return file_bpmetadata_proto_rawDescGZIP(), []int{13} +} + +func (x *BlueprintCostEstimate) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *BlueprintCostEstimate) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +// GCP cloud product(s) used in the blueprint. +type BlueprintCloudProduct struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A top-level (e.g. "Compute Engine") or secondary (e.g. "Binary Authorization") + // product used in the blueprint. + // Gen: manually-authored + ProductId string `protobuf:"bytes,1,opt,name=product_id,json=productId,proto3" json:"productId,omitempty" yaml:"productId,omitempty"` // @gotags: json:"productId,omitempty" yaml:"productId,omitempty" + // Url for the product. + // Gen: manually-authored + PageUrl string `protobuf:"bytes,2,opt,name=page_url,json=pageUrl,proto3" json:"pageUrl" yaml:"pageUrl"` // @gotags: json:"pageUrl" yaml:"pageUrl" + // A label string for the product, if it is not an integrated GCP product. + // E.g. "Data Studio" + // Gen: manually-authored + Label string `protobuf:"bytes,3,opt,name=label,proto3" json:"label,omitempty" yaml:"label,omitempty"` // @gotags: json:"label,omitempty" yaml:"label,omitempty" + // Is the product's landing page external to the GCP console e.g. + // lookerstudio.google.com + // Gen: manually-authored + IsExternal bool `protobuf:"varint,4,opt,name=is_external,json=isExternal,proto3" json:"isExternal,omitempty" yaml:"isExternal,omitempty"` // @gotags: json:"isExternal,omitempty" yaml:"isExternal,omitempty" +} + +func (x *BlueprintCloudProduct) Reset() { + *x = BlueprintCloudProduct{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlueprintCloudProduct) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlueprintCloudProduct) ProtoMessage() {} + +func (x *BlueprintCloudProduct) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlueprintCloudProduct.ProtoReflect.Descriptor instead. +func (*BlueprintCloudProduct) Descriptor() ([]byte, []int) { + return file_bpmetadata_proto_rawDescGZIP(), []int{14} +} + +func (x *BlueprintCloudProduct) GetProductId() string { + if x != nil { + return x.ProductId + } + return "" +} + +func (x *BlueprintCloudProduct) GetPageUrl() string { + if x != nil { + return x.PageUrl + } + return "" +} + +func (x *BlueprintCloudProduct) GetLabel() string { + if x != nil { + return x.Label + } + return "" +} + +func (x *BlueprintCloudProduct) GetIsExternal() bool { + if x != nil { + return x.IsExternal + } + return false +} + +// BlueprintOrgPolicyCheck defines GCP org policies to be checked +// for successful deployment +type BlueprintOrgPolicyCheck struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Id for the policy e.g. "compute-vmExternalIpAccess" + // Gen: manually-authored + PolicyId string `protobuf:"bytes,1,opt,name=policy_id,json=policyId,proto3" json:"policyId" yaml:"policyId"` // @gotags: json:"policyId" yaml:"policyId" + // If not set, it is assumed any version of this org policy + // prevents successful deployment of this solution. + // Gen: manually-authored + RequiredValues []string `protobuf:"bytes,2,rep,name=required_values,json=requiredValues,proto3" json:"requiredValues,omitempty" yaml:"requiredValues,omitempty"` // @gotags: json:"requiredValues,omitempty" yaml:"requiredValues,omitempty" +} + +func (x *BlueprintOrgPolicyCheck) Reset() { + *x = BlueprintOrgPolicyCheck{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlueprintOrgPolicyCheck) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlueprintOrgPolicyCheck) ProtoMessage() {} + +func (x *BlueprintOrgPolicyCheck) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlueprintOrgPolicyCheck.ProtoReflect.Descriptor instead. +func (*BlueprintOrgPolicyCheck) Descriptor() ([]byte, []int) { + return file_bpmetadata_proto_rawDescGZIP(), []int{15} +} + +func (x *BlueprintOrgPolicyCheck) GetPolicyId() string { + if x != nil { + return x.PolicyId + } + return "" +} + +func (x *BlueprintOrgPolicyCheck) GetRequiredValues() []string { + if x != nil { + return x.RequiredValues + } + return nil +} + +// BlueprintQuotaDetail defines the quota details for a blueprint. +type BlueprintQuotaDetail struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // DynamicVariable, if provided, associates the provided input variable + // with the corresponding resource and quota type. In its absence, the quota + // detail is assumed to be fixed. + // Gen: manually-authored + DynamicVariable string `protobuf:"bytes,1,opt,name=dynamic_variable,json=dynamicVariable,proto3" json:"dynamicVariable,omitempty" yaml:"dynamicVariable,omitempty"` // @gotags: json:"dynamicVariable,omitempty" yaml:"dynamicVariable,omitempty" + // ResourceType is the type of resource the quota will be applied to i.e. + // GCE Instance or Disk etc. + // Gen: manually-authored + ResourceType QuotaResourceType `protobuf:"varint,2,opt,name=resource_type,json=resourceType,proto3,enum=google.cloud.config.bpmetadata.QuotaResourceType" json:"resourceType" yaml:"resourceType"` // @gotags: json:"resourceType" yaml:"resourceType" + // QuotaType is a key/value pair of the actual quotas and their corresponding + // values. Valid keys for quota_type can be: + // MACHINE_TYPE, + // CPUs, + // DISK_TYPE OR + // SIZE_GB. + // Gen: manually-authored + QuotaType map[string]string `protobuf:"bytes,3,rep,name=quota_type,json=quotaType,proto3" json:"quotaType" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3" yaml:"quotaType"` // @gotags: json:"quotaType" yaml:"quotaType" +} + +func (x *BlueprintQuotaDetail) Reset() { + *x = BlueprintQuotaDetail{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlueprintQuotaDetail) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlueprintQuotaDetail) ProtoMessage() {} + +func (x *BlueprintQuotaDetail) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlueprintQuotaDetail.ProtoReflect.Descriptor instead. +func (*BlueprintQuotaDetail) Descriptor() ([]byte, []int) { + return file_bpmetadata_proto_rawDescGZIP(), []int{16} +} + +func (x *BlueprintQuotaDetail) GetDynamicVariable() string { + if x != nil { + return x.DynamicVariable + } + return "" +} + +func (x *BlueprintQuotaDetail) GetResourceType() QuotaResourceType { + if x != nil { + return x.ResourceType + } + return QuotaResourceType_QRT_UNDEFINED +} + +func (x *BlueprintQuotaDetail) GetQuotaType() map[string]string { + if x != nil { + return x.QuotaType + } + return nil +} + +// BlueprintAuthor defines the author of a blueprint. +type BlueprintAuthor struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Name of template author or organization. + // Gen: manually-authored + Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title" yaml:"title"` // @gotags: json:"title" yaml:"title" + // Description of the author. + // Gen: manually-authored + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty" yaml:"description,omitempty"` // @gotags: json:"description,omitempty" yaml:"description,omitempty" + // Link to the author's website. + // Gen: manually-authored + Url string `protobuf:"bytes,3,opt,name=url,proto3" json:"url,omitempty" yaml:"url,omitempty"` // @gotags: json:"url,omitempty" yaml:"url,omitempty" +} + +func (x *BlueprintAuthor) Reset() { + *x = BlueprintAuthor{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlueprintAuthor) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlueprintAuthor) ProtoMessage() {} + +func (x *BlueprintAuthor) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlueprintAuthor.ProtoReflect.Descriptor instead. +func (*BlueprintAuthor) Descriptor() ([]byte, []int) { + return file_bpmetadata_proto_rawDescGZIP(), []int{17} +} + +func (x *BlueprintAuthor) GetTitle() string { + if x != nil { + return x.Title + } + return "" +} + +func (x *BlueprintAuthor) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *BlueprintAuthor) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +// A group of related software components for the blueprint. +type BlueprintSoftwareGroup struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Pre-defined software types. + // Gen: manually-authored + Type SoftwareGroupType `protobuf:"varint,1,opt,name=type,proto3,enum=google.cloud.config.bpmetadata.SoftwareGroupType" json:"type,omitempty" yaml:"type,omitempty"` // @gotags: json:"type,omitempty" yaml:"type,omitempty" + // Software components belonging to this group. + // Gen: manually-authored + Software []*BlueprintSoftware `protobuf:"bytes,2,rep,name=software,proto3" json:"software,omitempty" yaml:"software,omitempty"` // @gotags: json:"software,omitempty" yaml:"software,omitempty" +} + +func (x *BlueprintSoftwareGroup) Reset() { + *x = BlueprintSoftwareGroup{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlueprintSoftwareGroup) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlueprintSoftwareGroup) ProtoMessage() {} + +func (x *BlueprintSoftwareGroup) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlueprintSoftwareGroup.ProtoReflect.Descriptor instead. +func (*BlueprintSoftwareGroup) Descriptor() ([]byte, []int) { + return file_bpmetadata_proto_rawDescGZIP(), []int{18} +} + +func (x *BlueprintSoftwareGroup) GetType() SoftwareGroupType { + if x != nil { + return x.Type + } + return SoftwareGroupType_SG_UNSPECIFIED +} + +func (x *BlueprintSoftwareGroup) GetSoftware() []*BlueprintSoftware { + if x != nil { + return x.Software + } + return nil +} + +// A description of a piece of a single software component +// installed by the blueprint. +type BlueprintSoftware struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // User-visible title. + // Gen: manually-authored + Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title" yaml:"title"` // @gotags: json:"title" yaml:"title" + // Software version. + // Gen: manually-authored + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty" yaml:"version,omitempty"` // @gotags: json:"version,omitempty" yaml:"version,omitempty" + // Link to development site or marketing page for this software. + // Gen: manually-authored + Url string `protobuf:"bytes,3,opt,name=url,proto3" json:"url,omitempty" yaml:"url,omitempty"` // @gotags: json:"url,omitempty" yaml:"url,omitempty" + // Link to license page. + // Gen: manually-authored + LicenseUrl string `protobuf:"bytes,4,opt,name=license_url,json=licenseUrl,proto3" json:"licenseUrl,omitempty" yaml:"licenseUrl,omitempty"` // @gotags: json:"licenseUrl,omitempty" yaml:"licenseUrl,omitempty" +} + +func (x *BlueprintSoftware) Reset() { + *x = BlueprintSoftware{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlueprintSoftware) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlueprintSoftware) ProtoMessage() {} + +func (x *BlueprintSoftware) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlueprintSoftware.ProtoReflect.Descriptor instead. +func (*BlueprintSoftware) Descriptor() ([]byte, []int) { + return file_bpmetadata_proto_rawDescGZIP(), []int{19} +} + +func (x *BlueprintSoftware) GetTitle() string { + if x != nil { + return x.Title + } + return "" +} + +func (x *BlueprintSoftware) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *BlueprintSoftware) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +func (x *BlueprintSoftware) GetLicenseUrl() string { + if x != nil { + return x.LicenseUrl + } + return "" +} + +// A description of a support option +type BlueprintSupport struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Description of the support option. + // Gen: manually-authored + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description" yaml:"description"` // @gotags: json:"description" yaml:"description" + // Link to the page providing this support option. + // Gen: manually-authored + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty" yaml:"url,omitempty"` // @gotags: json:"url,omitempty" yaml:"url,omitempty" + // The organization or group that provides the support option (e.g.: + // "Community", "Google"). + // Gen: manually-authored + Entity string `protobuf:"bytes,3,opt,name=entity,proto3" json:"entity,omitempty" yaml:"entity,omitempty"` // @gotags: json:"entity,omitempty" yaml:"entity,omitempty" + // Whether to show the customer's support ID. + // Gen: manually-authored + ShowSupportId bool `protobuf:"varint,4,opt,name=show_support_id,json=showSupportId,proto3" json:"showSupportId,omitempty" yaml:"showSupportId,omitempty"` // @gotags: json:"showSupportId,omitempty" yaml:"showSupportId,omitempty" +} + +func (x *BlueprintSupport) Reset() { + *x = BlueprintSupport{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlueprintSupport) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlueprintSupport) ProtoMessage() {} + +func (x *BlueprintSupport) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlueprintSupport.ProtoReflect.Descriptor instead. +func (*BlueprintSupport) Descriptor() ([]byte, []int) { + return file_bpmetadata_proto_rawDescGZIP(), []int{20} +} + +func (x *BlueprintSupport) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *BlueprintSupport) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +func (x *BlueprintSupport) GetEntity() string { + if x != nil { + return x.Entity + } + return "" +} + +func (x *BlueprintSupport) GetShowSupportId() bool { + if x != nil { + return x.ShowSupportId + } + return false +} + +type BlueprintArchitecture struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Gen: auto-generated - the URL & list content following the "## Architecture" tag e.g. + // ## Architecture + // ![Blueprint Architecture](assets/architecture.png) + // 1. Step no. 1 + // 2. Step no. 2 + // 3. Step no. 3 + DiagramUrl string `protobuf:"bytes,1,opt,name=diagram_url,json=diagramUrl,proto3" json:"diagramUrl" yaml:"diagramUrl"` // @gotags: json:"diagramUrl" yaml:"diagramUrl" + // Gen: auto-generated - the list items following the "## Architecture" tag. + Description []string `protobuf:"bytes,2,rep,name=description,proto3" json:"description" yaml:"description"` // @gotags: json:"description" yaml:"description" +} + +func (x *BlueprintArchitecture) Reset() { + *x = BlueprintArchitecture{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlueprintArchitecture) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlueprintArchitecture) ProtoMessage() {} + +func (x *BlueprintArchitecture) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlueprintArchitecture.ProtoReflect.Descriptor instead. +func (*BlueprintArchitecture) Descriptor() ([]byte, []int) { + return file_bpmetadata_proto_rawDescGZIP(), []int{21} +} + +func (x *BlueprintArchitecture) GetDiagramUrl() string { + if x != nil { + return x.DiagramUrl + } + return "" +} + +func (x *BlueprintArchitecture) GetDescription() []string { + if x != nil { + return x.Description + } + return nil +} + +type BlueprintMiscContent struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name" yaml:"name"` // @gotags: json:"name" yaml:"name" + Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty" yaml:"location,omitempty"` // @gotags: json:"location,omitempty" yaml:"location,omitempty" +} + +func (x *BlueprintMiscContent) Reset() { + *x = BlueprintMiscContent{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlueprintMiscContent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlueprintMiscContent) ProtoMessage() {} + +func (x *BlueprintMiscContent) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlueprintMiscContent.ProtoReflect.Descriptor instead. +func (*BlueprintMiscContent) Descriptor() ([]byte, []int) { + return file_bpmetadata_proto_rawDescGZIP(), []int{22} +} + +func (x *BlueprintMiscContent) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *BlueprintMiscContent) GetLocation() string { + if x != nil { + return x.Location + } + return "" +} + +type BlueprintDiagram struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name" yaml:"name"` // @gotags: json:"name" yaml:"name" + AltText string `protobuf:"bytes,2,opt,name=alt_text,json=altText,proto3" json:"altText,omitempty" yaml:"altText,omitempty"` // @gotags: json:"altText,omitempty" yaml:"altText,omitempty" + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty" yaml:"description,omitempty"` // @gotags: json:"description,omitempty" yaml:"description,omitempty" +} + +func (x *BlueprintDiagram) Reset() { + *x = BlueprintDiagram{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlueprintDiagram) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlueprintDiagram) ProtoMessage() {} + +func (x *BlueprintDiagram) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlueprintDiagram.ProtoReflect.Descriptor instead. +func (*BlueprintDiagram) Descriptor() ([]byte, []int) { + return file_bpmetadata_proto_rawDescGZIP(), []int{23} +} + +func (x *BlueprintDiagram) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *BlueprintDiagram) GetAltText() string { + if x != nil { + return x.AltText + } + return "" +} + +func (x *BlueprintDiagram) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +type BlueprintListContent struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title" yaml:"title"` // @gotags: json:"title" yaml:"title" + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty" yaml:"url,omitempty"` // @gotags: json:"url,omitempty" yaml:"url,omitempty" +} + +func (x *BlueprintListContent) Reset() { + *x = BlueprintListContent{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlueprintListContent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlueprintListContent) ProtoMessage() {} + +func (x *BlueprintListContent) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlueprintListContent.ProtoReflect.Descriptor instead. +func (*BlueprintListContent) Descriptor() ([]byte, []int) { + return file_bpmetadata_proto_rawDescGZIP(), []int{24} +} + +func (x *BlueprintListContent) GetTitle() string { + if x != nil { + return x.Title + } + return "" +} + +func (x *BlueprintListContent) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +type BlueprintVariable struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty" yaml:"name,omitempty"` // @gotags: json:"name,omitempty" yaml:"name,omitempty" + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty" yaml:"description,omitempty"` // @gotags: json:"description,omitempty" yaml:"description,omitempty" + VarType string `protobuf:"bytes,3,opt,name=var_type,json=varType,proto3" json:"varType,omitempty" yaml:"varType,omitempty"` // @gotags: json:"varType,omitempty" yaml:"varType,omitempty" + DefaultValue *structpb.Value `protobuf:"bytes,4,opt,name=default_value,json=defaultValue,proto3" json:"defaultValue,omitempty" yaml:"defaultValue,omitempty"` // @gotags: json:"defaultValue,omitempty" yaml:"defaultValue,omitempty" + Required bool `protobuf:"varint,5,opt,name=required,proto3" json:"required,omitempty" yaml:"required,omitempty"` // @gotags: json:"required,omitempty" yaml:"required,omitempty" + // Incoming connections to this variable. + // Connections are outputs from other blueprints that can be potentially + // connected to this variable. + // Gen: manually-authored. + Connections []*BlueprintConnection `protobuf:"bytes,6,rep,name=connections,proto3" json:"connections,omitempty" yaml:"connections,omitempty"` // @gotags: json:"connections,omitempty" yaml:"connections,omitempty" +} + +func (x *BlueprintVariable) Reset() { + *x = BlueprintVariable{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlueprintVariable) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlueprintVariable) ProtoMessage() {} + +func (x *BlueprintVariable) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlueprintVariable.ProtoReflect.Descriptor instead. +func (*BlueprintVariable) Descriptor() ([]byte, []int) { + return file_bpmetadata_proto_rawDescGZIP(), []int{25} +} + +func (x *BlueprintVariable) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *BlueprintVariable) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *BlueprintVariable) GetVarType() string { + if x != nil { + return x.VarType + } + return "" +} + +func (x *BlueprintVariable) GetDefaultValue() *structpb.Value { + if x != nil { + return x.DefaultValue + } + return nil +} + +func (x *BlueprintVariable) GetRequired() bool { + if x != nil { + return x.Required + } + return false +} + +func (x *BlueprintVariable) GetConnections() []*BlueprintConnection { + if x != nil { + return x.Connections + } + return nil +} + +// Defines an incoming connection from a blueprint. +type BlueprintConnection struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Source of the connection. + // Gen: manually-authored. + Source *ConnectionSource `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty" yaml:"source,omitempty"` // @gotags: json:"source,omitempty" yaml:"source,omitempty" + // Connection specifications. + // Gen: manually-authored. + Spec *ConnectionSpec `protobuf:"bytes,2,opt,name=spec,proto3" json:"spec,omitempty" yaml:"spec,omitempty"` // @gotags: json:"spec,omitempty" yaml:"spec,omitempty" +} + +func (x *BlueprintConnection) Reset() { + *x = BlueprintConnection{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlueprintConnection) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlueprintConnection) ProtoMessage() {} + +func (x *BlueprintConnection) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlueprintConnection.ProtoReflect.Descriptor instead. +func (*BlueprintConnection) Descriptor() ([]byte, []int) { + return file_bpmetadata_proto_rawDescGZIP(), []int{26} +} + +func (x *BlueprintConnection) GetSource() *ConnectionSource { + if x != nil { + return x.Source + } + return nil +} + +func (x *BlueprintConnection) GetSpec() *ConnectionSpec { + if x != nil { + return x.Spec + } + return nil +} + +// Defines the source of a connection. +type ConnectionSource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Source of the connection. Defined using the same format as module source + // of form [hostname]/namespace/name/provider for registry references and + // unprefixed github.com URLs for github references. + // Gen: manually-authored. + Source string `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty" yaml:"source,omitempty"` // @gotags: json:"source,omitempty" yaml:"source,omitempty" + // Version constraint syntax using the same format as module version + // constraints. + // Gen: manually-authored. + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty" yaml:"version,omitempty"` // @gotags: json:"version,omitempty" yaml:"version,omitempty" +} + +func (x *ConnectionSource) Reset() { + *x = ConnectionSource{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConnectionSource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConnectionSource) ProtoMessage() {} + +func (x *ConnectionSource) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConnectionSource.ProtoReflect.Descriptor instead. +func (*ConnectionSource) Descriptor() ([]byte, []int) { + return file_bpmetadata_proto_rawDescGZIP(), []int{27} +} + +func (x *ConnectionSource) GetSource() string { + if x != nil { + return x.Source + } + return "" +} + +func (x *ConnectionSource) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +// Defines the specifications of a connection. +type ConnectionSpec struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output expression identifying output being connected to variable. + // This can be the output name or more complex expression like attribuite notation. + // Gen: manually-authored. + OutputExpr string `protobuf:"bytes,1,opt,name=output_expr,json=outputExpr,proto3" json:"outputExpr,omitempty" yaml:"outputExpr,omitempty"` // @gotags: json:"outputExpr,omitempty" yaml:"outputExpr,omitempty" + // Optional dot separated attribuite notation to connect to a specific object field of the input variable. + // Gen: manually-authored. + InputPath *string `protobuf:"bytes,2,opt,name=input_path,json=inputPath,proto3,oneof" json:"inputPath,omitempty" yaml:"inputPath,omitempty"` // @gotags: json:"inputPath,omitempty" yaml:"inputPath,omitempty" +} + +func (x *ConnectionSpec) Reset() { + *x = ConnectionSpec{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConnectionSpec) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConnectionSpec) ProtoMessage() {} + +func (x *ConnectionSpec) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConnectionSpec.ProtoReflect.Descriptor instead. +func (*ConnectionSpec) Descriptor() ([]byte, []int) { + return file_bpmetadata_proto_rawDescGZIP(), []int{28} +} + +func (x *ConnectionSpec) GetOutputExpr() string { + if x != nil { + return x.OutputExpr + } + return "" +} + +func (x *ConnectionSpec) GetInputPath() string { + if x != nil && x.InputPath != nil { + return *x.InputPath + } + return "" +} + +// BlueprintVariableGroup is manually entered. +type BlueprintVariableGroup struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name" yaml:"name"` // @gotags: json:"name" yaml:"name" + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty" yaml:"description,omitempty"` // @gotags: json:"description,omitempty" yaml:"description,omitempty" + Variables []string `protobuf:"bytes,3,rep,name=variables,proto3" json:"variables,omitempty" yaml:"variables,omitempty"` // @gotags: json:"variables,omitempty" yaml:"variables,omitempty" +} + +func (x *BlueprintVariableGroup) Reset() { + *x = BlueprintVariableGroup{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlueprintVariableGroup) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlueprintVariableGroup) ProtoMessage() {} + +func (x *BlueprintVariableGroup) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlueprintVariableGroup.ProtoReflect.Descriptor instead. +func (*BlueprintVariableGroup) Descriptor() ([]byte, []int) { + return file_bpmetadata_proto_rawDescGZIP(), []int{29} +} + +func (x *BlueprintVariableGroup) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *BlueprintVariableGroup) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *BlueprintVariableGroup) GetVariables() []string { + if x != nil { + return x.Variables + } + return nil +} + +type BlueprintOutput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name" yaml:"name"` // @gotags: json:"name" yaml:"name" + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty" yaml:"description,omitempty"` // @gotags: json:"description,omitempty" yaml:"description,omitempty" + // Serialized type representation of the output value. + // Gen: manually-authored but will be automated in the future. + Type *structpb.Value `protobuf:"bytes,3,opt,name=type,proto3,oneof" json:"type,omitempty" yaml:"type,omitempty"` // @gotags: json:"type,omitempty" yaml:"type,omitempty" +} + +func (x *BlueprintOutput) Reset() { + *x = BlueprintOutput{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlueprintOutput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlueprintOutput) ProtoMessage() {} + +func (x *BlueprintOutput) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlueprintOutput.ProtoReflect.Descriptor instead. +func (*BlueprintOutput) Descriptor() ([]byte, []int) { + return file_bpmetadata_proto_rawDescGZIP(), []int{30} +} + +func (x *BlueprintOutput) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *BlueprintOutput) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *BlueprintOutput) GetType() *structpb.Value { + if x != nil { + return x.Type + } + return nil +} + +type BlueprintRoles struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Level string `protobuf:"bytes,1,opt,name=level,proto3" json:"level" yaml:"level"` // @gotags: json:"level" yaml:"level" + Roles []string `protobuf:"bytes,2,rep,name=roles,proto3" json:"roles" yaml:"roles"` // @gotags: json:"roles" yaml:"roles" +} + +func (x *BlueprintRoles) Reset() { + *x = BlueprintRoles{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlueprintRoles) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlueprintRoles) ProtoMessage() {} + +func (x *BlueprintRoles) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlueprintRoles.ProtoReflect.Descriptor instead. +func (*BlueprintRoles) Descriptor() ([]byte, []int) { + return file_bpmetadata_proto_rawDescGZIP(), []int{31} +} + +func (x *BlueprintRoles) GetLevel() string { + if x != nil { + return x.Level + } + return "" +} + +func (x *BlueprintRoles) GetRoles() []string { + if x != nil { + return x.Roles + } + return nil +} + +var File_bpmetadata_proto protoreflect.FileDescriptor + +var file_bpmetadata_proto_rawDesc = []byte{ + 0x0a, 0x10, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x13, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x75, 0x69, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe1, 0x01, 0x0a, 0x11, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, + 0x69, 0x6e, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x0b, 0x61, + 0x70, 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, + 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, + 0x12, 0x4c, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, + 0x4d, 0x65, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x49, + 0x0a, 0x04, 0x73, 0x70, 0x65, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x6c, + 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x53, + 0x70, 0x65, 0x63, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x22, 0xdc, 0x02, 0x0a, 0x10, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x54, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, + 0x4d, 0x65, 0x74, 0x61, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x63, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x2e, + 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x39, 0x0a, + 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3e, 0x0a, 0x10, 0x41, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x92, 0x03, 0x0a, 0x15, 0x42, 0x6c, 0x75, + 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x53, 0x70, + 0x65, 0x63, 0x12, 0x41, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, + 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x4a, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, + 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x12, 0x52, 0x0a, 0x0a, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, + 0x49, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x66, 0x61, 0x63, 0x65, 0x73, 0x12, 0x59, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x6c, 0x75, + 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x73, 0x52, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, + 0x12, 0x3b, 0x0a, 0x02, 0x75, 0x69, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x6c, + 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x55, 0x49, 0x52, 0x02, 0x75, 0x69, 0x22, 0xe5, 0x08, + 0x0a, 0x0d, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, + 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x4b, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, + 0x52, 0x65, 0x70, 0x6f, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x5d, 0x0a, 0x0e, + 0x61, 0x63, 0x74, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x6f, 0x6f, 0x6c, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x41, + 0x63, 0x74, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6f, 0x6c, 0x52, 0x0d, 0x61, 0x63, + 0x74, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6f, 0x6c, 0x12, 0x56, 0x0a, 0x0b, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x69, 0x63, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x69, 0x63, 0x6f, 0x6e, 0x12, 0x66, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x6c, 0x6f, + 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x54, + 0x69, 0x6d, 0x65, 0x45, 0x73, 0x74, 0x69, 0x6d, 0x61, 0x74, 0x65, 0x52, 0x12, 0x64, 0x65, 0x70, + 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x5a, 0x0a, 0x0d, 0x63, 0x6f, 0x73, 0x74, 0x5f, 0x65, 0x73, 0x74, 0x69, 0x6d, 0x61, 0x74, 0x65, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, + 0x74, 0x43, 0x6f, 0x73, 0x74, 0x45, 0x73, 0x74, 0x69, 0x6d, 0x61, 0x74, 0x65, 0x52, 0x0c, 0x63, + 0x6f, 0x73, 0x74, 0x45, 0x73, 0x74, 0x69, 0x6d, 0x61, 0x74, 0x65, 0x12, 0x5c, 0x0a, 0x0e, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x74, 0x73, 0x18, 0x09, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x43, 0x6c, + 0x6f, 0x75, 0x64, 0x50, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x74, 0x52, 0x0d, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x50, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x74, 0x73, 0x12, 0x59, 0x0a, 0x0d, 0x71, 0x75, 0x6f, + 0x74, 0x61, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x51, 0x75, 0x6f, 0x74, 0x61, + 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x52, 0x0c, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x44, 0x65, 0x74, + 0x61, 0x69, 0x6c, 0x73, 0x12, 0x47, 0x0a, 0x06, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x41, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x52, 0x06, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x12, 0x5f, 0x0a, + 0x0f, 0x73, 0x6f, 0x66, 0x74, 0x77, 0x61, 0x72, 0x65, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, + 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, + 0x74, 0x53, 0x6f, 0x66, 0x74, 0x77, 0x61, 0x72, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x0e, + 0x73, 0x6f, 0x66, 0x74, 0x77, 0x61, 0x72, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x53, + 0x0a, 0x0c, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x0d, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x53, + 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0b, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x49, + 0x6e, 0x66, 0x6f, 0x12, 0x63, 0x0a, 0x11, 0x6f, 0x72, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x4f, 0x72, 0x67, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x0f, 0x6f, 0x72, 0x67, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x73, 0x69, 0x6e, 0x67, + 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x0f, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x10, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x44, 0x65, 0x70, 0x6c, 0x6f, + 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x22, 0xc6, 0x03, 0x0a, 0x10, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, + 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x59, 0x0a, 0x0c, 0x61, 0x72, + 0x63, 0x68, 0x69, 0x74, 0x65, 0x63, 0x74, 0x75, 0x72, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x41, 0x72, 0x63, 0x68, 0x69, + 0x74, 0x65, 0x63, 0x74, 0x75, 0x72, 0x65, 0x52, 0x0c, 0x61, 0x72, 0x63, 0x68, 0x69, 0x74, 0x65, + 0x63, 0x74, 0x75, 0x72, 0x65, 0x12, 0x4c, 0x0a, 0x08, 0x64, 0x69, 0x61, 0x67, 0x72, 0x61, 0x6d, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, + 0x6e, 0x74, 0x44, 0x69, 0x61, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x08, 0x64, 0x69, 0x61, 0x67, 0x72, + 0x61, 0x6d, 0x73, 0x12, 0x5a, 0x0a, 0x0d, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x6c, 0x75, 0x65, + 0x70, 0x72, 0x69, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x52, 0x0d, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x5b, 0x0a, 0x0e, 0x73, 0x75, 0x62, 0x5f, 0x62, 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, + 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, + 0x6e, 0x74, 0x4d, 0x69, 0x73, 0x63, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x0d, 0x73, + 0x75, 0x62, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x50, 0x0a, 0x08, + 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x4d, 0x69, 0x73, 0x63, 0x43, 0x6f, 0x6e, + 0x74, 0x65, 0x6e, 0x74, 0x52, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x22, 0x91, + 0x02, 0x0a, 0x12, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x66, 0x61, 0x63, 0x65, 0x12, 0x4f, 0x0a, 0x09, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, + 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, + 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, + 0x69, 0x6e, 0x74, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x09, 0x76, 0x61, 0x72, + 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x5f, 0x0a, 0x0f, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, + 0x6c, 0x65, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, + 0x6c, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x0e, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, + 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x49, 0x0a, 0x07, 0x6f, 0x75, 0x74, 0x70, 0x75, + 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, + 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, + 0x69, 0x6e, 0x74, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x52, 0x07, 0x6f, 0x75, 0x74, 0x70, 0x75, + 0x74, 0x73, 0x22, 0xd7, 0x01, 0x0a, 0x15, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x44, 0x0a, 0x05, + 0x72, 0x6f, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x6c, 0x75, + 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x52, 0x6f, 0x6c, 0x65, 0x73, 0x52, 0x05, 0x72, 0x6f, 0x6c, + 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x5c, + 0x0a, 0x11, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, + 0x64, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x70, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x43, 0x0a, 0x0f, + 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x16, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x22, 0xa2, 0x01, 0x0a, 0x0b, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x55, + 0x49, 0x12, 0x46, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x55, 0x49, 0x49, 0x6e, 0x70, + 0x75, 0x74, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x4b, 0x0a, 0x07, 0x72, 0x75, 0x6e, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x6c, 0x75, 0x65, + 0x70, 0x72, 0x69, 0x6e, 0x74, 0x55, 0x49, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x52, 0x07, 0x72, + 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x22, 0x5c, 0x0a, 0x13, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, + 0x69, 0x6e, 0x74, 0x52, 0x65, 0x70, 0x6f, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x12, 0x12, 0x0a, + 0x04, 0x72, 0x65, 0x70, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x65, 0x70, + 0x6f, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x69, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x64, 0x69, 0x72, 0x22, 0x4a, 0x0a, 0x16, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, + 0x74, 0x41, 0x63, 0x74, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6f, 0x6c, 0x12, 0x16, + 0x0a, 0x06, 0x66, 0x6c, 0x61, 0x76, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x66, 0x6c, 0x61, 0x76, 0x6f, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x22, 0xc0, 0x01, 0x0a, 0x14, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x61, 0x67, + 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x61, 0x67, 0x6c, + 0x69, 0x6e, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x12, + 0x1d, 0x0a, 0x0a, 0x70, 0x72, 0x65, 0x5f, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x72, 0x65, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x12, 0x12, + 0x0a, 0x04, 0x68, 0x74, 0x6d, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x74, + 0x6d, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x75, 0x6c, 0x61, 0x5f, 0x75, 0x72, 0x6c, 0x73, 0x18, + 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x65, 0x75, 0x6c, 0x61, 0x55, 0x72, 0x6c, 0x73, 0x12, + 0x22, 0x0a, 0x0c, 0x61, 0x72, 0x63, 0x68, 0x69, 0x74, 0x65, 0x63, 0x74, 0x75, 0x72, 0x65, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x72, 0x63, 0x68, 0x69, 0x74, 0x65, 0x63, 0x74, + 0x75, 0x72, 0x65, 0x22, 0x6f, 0x0a, 0x15, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, + 0x54, 0x69, 0x6d, 0x65, 0x45, 0x73, 0x74, 0x69, 0x6d, 0x61, 0x74, 0x65, 0x12, 0x2d, 0x0a, 0x12, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, + 0x63, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x11, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x63, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x64, + 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, + 0x53, 0x65, 0x63, 0x73, 0x22, 0x4b, 0x0a, 0x15, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, + 0x74, 0x43, 0x6f, 0x73, 0x74, 0x45, 0x73, 0x74, 0x69, 0x6d, 0x61, 0x74, 0x65, 0x12, 0x20, 0x0a, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, + 0x6c, 0x22, 0x88, 0x01, 0x0a, 0x15, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x43, + 0x6c, 0x6f, 0x75, 0x64, 0x50, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x70, + 0x72, 0x6f, 0x64, 0x75, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x74, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x70, 0x61, + 0x67, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, + 0x67, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x69, + 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0a, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x22, 0x5f, 0x0a, 0x17, + 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x4f, 0x72, 0x67, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x49, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x72, + 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0xbb, 0x02, + 0x0a, 0x14, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x51, 0x75, 0x6f, 0x74, 0x61, + 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x5f, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, + 0x65, 0x12, 0x56, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, + 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x62, 0x0a, 0x0a, 0x71, 0x75, 0x6f, + 0x74, 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, + 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x44, 0x65, 0x74, + 0x61, 0x69, 0x6c, 0x2e, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x54, 0x79, 0x70, 0x65, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x09, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x3c, 0x0a, + 0x0e, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x54, 0x79, 0x70, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x5b, 0x0a, 0x0f, 0x42, + 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x12, 0x14, + 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, + 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0xae, 0x01, 0x0a, 0x16, 0x42, 0x6c, 0x75, + 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x53, 0x6f, 0x66, 0x74, 0x77, 0x61, 0x72, 0x65, 0x47, 0x72, + 0x6f, 0x75, 0x70, 0x12, 0x45, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x53, 0x6f, 0x66, 0x74, 0x77, 0x61, 0x72, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x4d, 0x0a, 0x08, 0x73, 0x6f, + 0x66, 0x74, 0x77, 0x61, 0x72, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x6c, + 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x53, 0x6f, 0x66, 0x74, 0x77, 0x61, 0x72, 0x65, 0x52, + 0x08, 0x73, 0x6f, 0x66, 0x74, 0x77, 0x61, 0x72, 0x65, 0x22, 0x76, 0x0a, 0x11, 0x42, 0x6c, 0x75, + 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x53, 0x6f, 0x66, 0x74, 0x77, 0x61, 0x72, 0x65, 0x12, 0x14, + 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, + 0x69, 0x74, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x10, + 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, + 0x12, 0x1f, 0x0a, 0x0b, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x55, 0x72, + 0x6c, 0x22, 0x86, 0x01, 0x0a, 0x10, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x53, + 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x68, 0x6f, 0x77, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, + 0x72, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x68, 0x6f, + 0x77, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x49, 0x64, 0x22, 0x5a, 0x0a, 0x15, 0x42, 0x6c, + 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x41, 0x72, 0x63, 0x68, 0x69, 0x74, 0x65, 0x63, 0x74, + 0x75, 0x72, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x72, 0x61, 0x6d, 0x5f, 0x75, + 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x69, 0x61, 0x67, 0x72, 0x61, + 0x6d, 0x55, 0x72, 0x6c, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x46, 0x0a, 0x14, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, + 0x69, 0x6e, 0x74, 0x4d, 0x69, 0x73, 0x63, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x63, + 0x0a, 0x10, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x44, 0x69, 0x61, 0x67, 0x72, + 0x61, 0x6d, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x6c, 0x74, 0x5f, 0x74, 0x65, + 0x78, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x6c, 0x74, 0x54, 0x65, 0x78, + 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x22, 0x3e, 0x0a, 0x14, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, + 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, + 0x69, 0x74, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, + 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x75, 0x72, 0x6c, 0x22, 0x94, 0x02, 0x0a, 0x11, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, + 0x74, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x19, 0x0a, 0x08, 0x76, 0x61, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x76, 0x61, 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x3b, 0x0a, 0x0d, 0x64, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, + 0x72, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, + 0x72, 0x65, 0x64, 0x12, 0x55, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, + 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, + 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x63, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa3, 0x01, 0x0a, 0x13, 0x42, + 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x42, 0x0a, 0x04, + 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x70, 0x65, 0x63, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, + 0x22, 0x44, 0x0a, 0x10, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x64, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x70, 0x65, 0x63, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, + 0x75, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, + 0x75, 0x74, 0x70, 0x75, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x22, 0x0a, 0x0a, 0x69, 0x6e, 0x70, + 0x75, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x50, 0x61, 0x74, 0x68, 0x88, 0x01, 0x01, 0x42, 0x0d, 0x0a, + 0x0b, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x22, 0x6c, 0x0a, 0x16, + 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, + 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, + 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x09, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x22, 0x81, 0x01, 0x0a, 0x0f, 0x42, + 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x88, 0x01, 0x01, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3c, + 0x0a, 0x0e, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x52, 0x6f, 0x6c, 0x65, 0x73, + 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x6f, 0x6c, 0x65, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x72, 0x6f, 0x6c, 0x65, 0x73, 0x2a, 0x6a, 0x0a, 0x11, + 0x51, 0x75, 0x6f, 0x74, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x11, 0x0a, 0x0d, 0x51, 0x52, 0x54, 0x5f, 0x55, 0x4e, 0x44, 0x45, 0x46, 0x49, 0x4e, + 0x45, 0x44, 0x10, 0x00, 0x12, 0x22, 0x0a, 0x1e, 0x51, 0x52, 0x54, 0x5f, 0x52, 0x45, 0x53, 0x4f, + 0x55, 0x52, 0x43, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x43, 0x45, 0x5f, 0x49, 0x4e, + 0x53, 0x54, 0x41, 0x4e, 0x43, 0x45, 0x10, 0x01, 0x12, 0x1e, 0x0a, 0x1a, 0x51, 0x52, 0x54, 0x5f, + 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x43, + 0x45, 0x5f, 0x44, 0x49, 0x53, 0x4b, 0x10, 0x02, 0x2a, 0x32, 0x0a, 0x11, 0x53, 0x6f, 0x66, 0x74, + 0x77, 0x61, 0x72, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, + 0x0e, 0x53, 0x47, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x47, 0x5f, 0x4f, 0x53, 0x10, 0x01, 0x42, 0x48, 0x5a, 0x46, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x47, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2f, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2d, + 0x74, 0x6f, 0x6f, 0x6c, 0x6b, 0x69, 0x74, 0x2f, 0x63, 0x6c, 0x69, 0x2f, 0x62, 0x70, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_bpmetadata_proto_rawDescOnce sync.Once + file_bpmetadata_proto_rawDescData = file_bpmetadata_proto_rawDesc +) + +func file_bpmetadata_proto_rawDescGZIP() []byte { + file_bpmetadata_proto_rawDescOnce.Do(func() { + file_bpmetadata_proto_rawDescData = protoimpl.X.CompressGZIP(file_bpmetadata_proto_rawDescData) + }) + return file_bpmetadata_proto_rawDescData +} + +var file_bpmetadata_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_bpmetadata_proto_msgTypes = make([]protoimpl.MessageInfo, 35) +var file_bpmetadata_proto_goTypes = []interface{}{ + (QuotaResourceType)(0), // 0: google.cloud.config.bpmetadata.QuotaResourceType + (SoftwareGroupType)(0), // 1: google.cloud.config.bpmetadata.SoftwareGroupType + (*BlueprintMetadata)(nil), // 2: google.cloud.config.bpmetadata.BlueprintMetadata + (*ResourceTypeMeta)(nil), // 3: google.cloud.config.bpmetadata.ResourceTypeMeta + (*BlueprintMetadataSpec)(nil), // 4: google.cloud.config.bpmetadata.BlueprintMetadataSpec + (*BlueprintInfo)(nil), // 5: google.cloud.config.bpmetadata.BlueprintInfo + (*BlueprintContent)(nil), // 6: google.cloud.config.bpmetadata.BlueprintContent + (*BlueprintInterface)(nil), // 7: google.cloud.config.bpmetadata.BlueprintInterface + (*BlueprintRequirements)(nil), // 8: google.cloud.config.bpmetadata.BlueprintRequirements + (*ProviderVersion)(nil), // 9: google.cloud.config.bpmetadata.ProviderVersion + (*BlueprintUI)(nil), // 10: google.cloud.config.bpmetadata.BlueprintUI + (*BlueprintRepoDetail)(nil), // 11: google.cloud.config.bpmetadata.BlueprintRepoDetail + (*BlueprintActuationTool)(nil), // 12: google.cloud.config.bpmetadata.BlueprintActuationTool + (*BlueprintDescription)(nil), // 13: google.cloud.config.bpmetadata.BlueprintDescription + (*BlueprintTimeEstimate)(nil), // 14: google.cloud.config.bpmetadata.BlueprintTimeEstimate + (*BlueprintCostEstimate)(nil), // 15: google.cloud.config.bpmetadata.BlueprintCostEstimate + (*BlueprintCloudProduct)(nil), // 16: google.cloud.config.bpmetadata.BlueprintCloudProduct + (*BlueprintOrgPolicyCheck)(nil), // 17: google.cloud.config.bpmetadata.BlueprintOrgPolicyCheck + (*BlueprintQuotaDetail)(nil), // 18: google.cloud.config.bpmetadata.BlueprintQuotaDetail + (*BlueprintAuthor)(nil), // 19: google.cloud.config.bpmetadata.BlueprintAuthor + (*BlueprintSoftwareGroup)(nil), // 20: google.cloud.config.bpmetadata.BlueprintSoftwareGroup + (*BlueprintSoftware)(nil), // 21: google.cloud.config.bpmetadata.BlueprintSoftware + (*BlueprintSupport)(nil), // 22: google.cloud.config.bpmetadata.BlueprintSupport + (*BlueprintArchitecture)(nil), // 23: google.cloud.config.bpmetadata.BlueprintArchitecture + (*BlueprintMiscContent)(nil), // 24: google.cloud.config.bpmetadata.BlueprintMiscContent + (*BlueprintDiagram)(nil), // 25: google.cloud.config.bpmetadata.BlueprintDiagram + (*BlueprintListContent)(nil), // 26: google.cloud.config.bpmetadata.BlueprintListContent + (*BlueprintVariable)(nil), // 27: google.cloud.config.bpmetadata.BlueprintVariable + (*BlueprintConnection)(nil), // 28: google.cloud.config.bpmetadata.BlueprintConnection + (*ConnectionSource)(nil), // 29: google.cloud.config.bpmetadata.ConnectionSource + (*ConnectionSpec)(nil), // 30: google.cloud.config.bpmetadata.ConnectionSpec + (*BlueprintVariableGroup)(nil), // 31: google.cloud.config.bpmetadata.BlueprintVariableGroup + (*BlueprintOutput)(nil), // 32: google.cloud.config.bpmetadata.BlueprintOutput + (*BlueprintRoles)(nil), // 33: google.cloud.config.bpmetadata.BlueprintRoles + nil, // 34: google.cloud.config.bpmetadata.ResourceTypeMeta.LabelsEntry + nil, // 35: google.cloud.config.bpmetadata.ResourceTypeMeta.AnnotationsEntry + nil, // 36: google.cloud.config.bpmetadata.BlueprintQuotaDetail.QuotaTypeEntry + (*BlueprintUIInput)(nil), // 37: google.cloud.config.bpmetadata.BlueprintUIInput + (*BlueprintUIOutput)(nil), // 38: google.cloud.config.bpmetadata.BlueprintUIOutput + (*structpb.Value)(nil), // 39: google.protobuf.Value +} +var file_bpmetadata_proto_depIdxs = []int32{ + 3, // 0: google.cloud.config.bpmetadata.BlueprintMetadata.metadata:type_name -> google.cloud.config.bpmetadata.ResourceTypeMeta + 4, // 1: google.cloud.config.bpmetadata.BlueprintMetadata.spec:type_name -> google.cloud.config.bpmetadata.BlueprintMetadataSpec + 34, // 2: google.cloud.config.bpmetadata.ResourceTypeMeta.labels:type_name -> google.cloud.config.bpmetadata.ResourceTypeMeta.LabelsEntry + 35, // 3: google.cloud.config.bpmetadata.ResourceTypeMeta.annotations:type_name -> google.cloud.config.bpmetadata.ResourceTypeMeta.AnnotationsEntry + 5, // 4: google.cloud.config.bpmetadata.BlueprintMetadataSpec.info:type_name -> google.cloud.config.bpmetadata.BlueprintInfo + 6, // 5: google.cloud.config.bpmetadata.BlueprintMetadataSpec.content:type_name -> google.cloud.config.bpmetadata.BlueprintContent + 7, // 6: google.cloud.config.bpmetadata.BlueprintMetadataSpec.interfaces:type_name -> google.cloud.config.bpmetadata.BlueprintInterface + 8, // 7: google.cloud.config.bpmetadata.BlueprintMetadataSpec.requirements:type_name -> google.cloud.config.bpmetadata.BlueprintRequirements + 10, // 8: google.cloud.config.bpmetadata.BlueprintMetadataSpec.ui:type_name -> google.cloud.config.bpmetadata.BlueprintUI + 11, // 9: google.cloud.config.bpmetadata.BlueprintInfo.source:type_name -> google.cloud.config.bpmetadata.BlueprintRepoDetail + 12, // 10: google.cloud.config.bpmetadata.BlueprintInfo.actuation_tool:type_name -> google.cloud.config.bpmetadata.BlueprintActuationTool + 13, // 11: google.cloud.config.bpmetadata.BlueprintInfo.description:type_name -> google.cloud.config.bpmetadata.BlueprintDescription + 14, // 12: google.cloud.config.bpmetadata.BlueprintInfo.deployment_duration:type_name -> google.cloud.config.bpmetadata.BlueprintTimeEstimate + 15, // 13: google.cloud.config.bpmetadata.BlueprintInfo.cost_estimate:type_name -> google.cloud.config.bpmetadata.BlueprintCostEstimate + 16, // 14: google.cloud.config.bpmetadata.BlueprintInfo.cloud_products:type_name -> google.cloud.config.bpmetadata.BlueprintCloudProduct + 18, // 15: google.cloud.config.bpmetadata.BlueprintInfo.quota_details:type_name -> google.cloud.config.bpmetadata.BlueprintQuotaDetail + 19, // 16: google.cloud.config.bpmetadata.BlueprintInfo.author:type_name -> google.cloud.config.bpmetadata.BlueprintAuthor + 20, // 17: google.cloud.config.bpmetadata.BlueprintInfo.software_groups:type_name -> google.cloud.config.bpmetadata.BlueprintSoftwareGroup + 22, // 18: google.cloud.config.bpmetadata.BlueprintInfo.support_info:type_name -> google.cloud.config.bpmetadata.BlueprintSupport + 17, // 19: google.cloud.config.bpmetadata.BlueprintInfo.org_policy_checks:type_name -> google.cloud.config.bpmetadata.BlueprintOrgPolicyCheck + 23, // 20: google.cloud.config.bpmetadata.BlueprintContent.architecture:type_name -> google.cloud.config.bpmetadata.BlueprintArchitecture + 25, // 21: google.cloud.config.bpmetadata.BlueprintContent.diagrams:type_name -> google.cloud.config.bpmetadata.BlueprintDiagram + 26, // 22: google.cloud.config.bpmetadata.BlueprintContent.documentation:type_name -> google.cloud.config.bpmetadata.BlueprintListContent + 24, // 23: google.cloud.config.bpmetadata.BlueprintContent.sub_blueprints:type_name -> google.cloud.config.bpmetadata.BlueprintMiscContent + 24, // 24: google.cloud.config.bpmetadata.BlueprintContent.examples:type_name -> google.cloud.config.bpmetadata.BlueprintMiscContent + 27, // 25: google.cloud.config.bpmetadata.BlueprintInterface.variables:type_name -> google.cloud.config.bpmetadata.BlueprintVariable + 31, // 26: google.cloud.config.bpmetadata.BlueprintInterface.variable_groups:type_name -> google.cloud.config.bpmetadata.BlueprintVariableGroup + 32, // 27: google.cloud.config.bpmetadata.BlueprintInterface.outputs:type_name -> google.cloud.config.bpmetadata.BlueprintOutput + 33, // 28: google.cloud.config.bpmetadata.BlueprintRequirements.roles:type_name -> google.cloud.config.bpmetadata.BlueprintRoles + 9, // 29: google.cloud.config.bpmetadata.BlueprintRequirements.provider_versions:type_name -> google.cloud.config.bpmetadata.ProviderVersion + 37, // 30: google.cloud.config.bpmetadata.BlueprintUI.input:type_name -> google.cloud.config.bpmetadata.BlueprintUIInput + 38, // 31: google.cloud.config.bpmetadata.BlueprintUI.runtime:type_name -> google.cloud.config.bpmetadata.BlueprintUIOutput + 0, // 32: google.cloud.config.bpmetadata.BlueprintQuotaDetail.resource_type:type_name -> google.cloud.config.bpmetadata.QuotaResourceType + 36, // 33: google.cloud.config.bpmetadata.BlueprintQuotaDetail.quota_type:type_name -> google.cloud.config.bpmetadata.BlueprintQuotaDetail.QuotaTypeEntry + 1, // 34: google.cloud.config.bpmetadata.BlueprintSoftwareGroup.type:type_name -> google.cloud.config.bpmetadata.SoftwareGroupType + 21, // 35: google.cloud.config.bpmetadata.BlueprintSoftwareGroup.software:type_name -> google.cloud.config.bpmetadata.BlueprintSoftware + 39, // 36: google.cloud.config.bpmetadata.BlueprintVariable.default_value:type_name -> google.protobuf.Value + 28, // 37: google.cloud.config.bpmetadata.BlueprintVariable.connections:type_name -> google.cloud.config.bpmetadata.BlueprintConnection + 29, // 38: google.cloud.config.bpmetadata.BlueprintConnection.source:type_name -> google.cloud.config.bpmetadata.ConnectionSource + 30, // 39: google.cloud.config.bpmetadata.BlueprintConnection.spec:type_name -> google.cloud.config.bpmetadata.ConnectionSpec + 39, // 40: google.cloud.config.bpmetadata.BlueprintOutput.type:type_name -> google.protobuf.Value + 41, // [41:41] is the sub-list for method output_type + 41, // [41:41] is the sub-list for method input_type + 41, // [41:41] is the sub-list for extension type_name + 41, // [41:41] is the sub-list for extension extendee + 0, // [0:41] is the sub-list for field type_name +} + +func init() { file_bpmetadata_proto_init() } +func file_bpmetadata_proto_init() { + if File_bpmetadata_proto != nil { + return + } + file_bpmetadata_ui_proto_init() + if !protoimpl.UnsafeEnabled { + file_bpmetadata_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlueprintMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceTypeMeta); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlueprintMetadataSpec); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlueprintInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlueprintContent); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlueprintInterface); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlueprintRequirements); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProviderVersion); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlueprintUI); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlueprintRepoDetail); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlueprintActuationTool); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlueprintDescription); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlueprintTimeEstimate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlueprintCostEstimate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlueprintCloudProduct); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlueprintOrgPolicyCheck); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlueprintQuotaDetail); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlueprintAuthor); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlueprintSoftwareGroup); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlueprintSoftware); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlueprintSupport); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlueprintArchitecture); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlueprintMiscContent); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlueprintDiagram); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlueprintListContent); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlueprintVariable); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlueprintConnection); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConnectionSource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConnectionSpec); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlueprintVariableGroup); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlueprintOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlueprintRoles); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_bpmetadata_proto_msgTypes[28].OneofWrappers = []interface{}{} + file_bpmetadata_proto_msgTypes[30].OneofWrappers = []interface{}{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_bpmetadata_proto_rawDesc, + NumEnums: 2, + NumMessages: 35, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_bpmetadata_proto_goTypes, + DependencyIndexes: file_bpmetadata_proto_depIdxs, + EnumInfos: file_bpmetadata_proto_enumTypes, + MessageInfos: file_bpmetadata_proto_msgTypes, + }.Build() + File_bpmetadata_proto = out.File + file_bpmetadata_proto_rawDesc = nil + file_bpmetadata_proto_goTypes = nil + file_bpmetadata_proto_depIdxs = nil +} diff --git a/cli/bpmetadata/bpmetadata_ui.pb.go b/cli/bpmetadata/bpmetadata_ui.pb.go new file mode 100644 index 00000000000..fc1bcd185d2 --- /dev/null +++ b/cli/bpmetadata/bpmetadata_ui.pb.go @@ -0,0 +1,1538 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.23.4 +// source: bpmetadata_ui.proto + +package bpmetadata + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Type of the alternate default. +type DisplayVariable_AlternateDefault_AlternateType int32 + +const ( + // Default + DisplayVariable_AlternateDefault_ALTERNATE_TYPE_UNSPECIFIED DisplayVariable_AlternateDefault_AlternateType = 0 + // A more secure default. + DisplayVariable_AlternateDefault_ALTERNATE_TYPE_SECURITY DisplayVariable_AlternateDefault_AlternateType = 1 + // A default specifically needed for Design center. + DisplayVariable_AlternateDefault_ALTERNATE_TYPE_DC DisplayVariable_AlternateDefault_AlternateType = 2 +) + +// Enum value maps for DisplayVariable_AlternateDefault_AlternateType. +var ( + DisplayVariable_AlternateDefault_AlternateType_name = map[int32]string{ + 0: "ALTERNATE_TYPE_UNSPECIFIED", + 1: "ALTERNATE_TYPE_SECURITY", + 2: "ALTERNATE_TYPE_DC", + } + DisplayVariable_AlternateDefault_AlternateType_value = map[string]int32{ + "ALTERNATE_TYPE_UNSPECIFIED": 0, + "ALTERNATE_TYPE_SECURITY": 1, + "ALTERNATE_TYPE_DC": 2, + } +) + +func (x DisplayVariable_AlternateDefault_AlternateType) Enum() *DisplayVariable_AlternateDefault_AlternateType { + p := new(DisplayVariable_AlternateDefault_AlternateType) + *p = x + return p +} + +func (x DisplayVariable_AlternateDefault_AlternateType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (DisplayVariable_AlternateDefault_AlternateType) Descriptor() protoreflect.EnumDescriptor { + return file_bpmetadata_ui_proto_enumTypes[0].Descriptor() +} + +func (DisplayVariable_AlternateDefault_AlternateType) Type() protoreflect.EnumType { + return &file_bpmetadata_ui_proto_enumTypes[0] +} + +func (x DisplayVariable_AlternateDefault_AlternateType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use DisplayVariable_AlternateDefault_AlternateType.Descriptor instead. +func (DisplayVariable_AlternateDefault_AlternateType) EnumDescriptor() ([]byte, []int) { + return file_bpmetadata_ui_proto_rawDescGZIP(), []int{1, 0, 0} +} + +type DisplayVariableToggle_ToggleType int32 + +const ( + // Default + DisplayVariableToggle_DISPLAY_VARIABLE_TOGGLE_TYPE_UNSPECIFIED DisplayVariableToggle_ToggleType = 0 + // Boolean + DisplayVariableToggle_DISPLAY_VARIABLE_TOGGLE_TYPE_BOOLEAN DisplayVariableToggle_ToggleType = 1 + // String + DisplayVariableToggle_DISPLAY_VARIABLE_TOGGLE_TYPE_STRING DisplayVariableToggle_ToggleType = 2 + // Integer + DisplayVariableToggle_DISPLAY_VARIABLE_TOGGLE_TYPE_INTEGER DisplayVariableToggle_ToggleType = 3 +) + +// Enum value maps for DisplayVariableToggle_ToggleType. +var ( + DisplayVariableToggle_ToggleType_name = map[int32]string{ + 0: "DISPLAY_VARIABLE_TOGGLE_TYPE_UNSPECIFIED", + 1: "DISPLAY_VARIABLE_TOGGLE_TYPE_BOOLEAN", + 2: "DISPLAY_VARIABLE_TOGGLE_TYPE_STRING", + 3: "DISPLAY_VARIABLE_TOGGLE_TYPE_INTEGER", + } + DisplayVariableToggle_ToggleType_value = map[string]int32{ + "DISPLAY_VARIABLE_TOGGLE_TYPE_UNSPECIFIED": 0, + "DISPLAY_VARIABLE_TOGGLE_TYPE_BOOLEAN": 1, + "DISPLAY_VARIABLE_TOGGLE_TYPE_STRING": 2, + "DISPLAY_VARIABLE_TOGGLE_TYPE_INTEGER": 3, + } +) + +func (x DisplayVariableToggle_ToggleType) Enum() *DisplayVariableToggle_ToggleType { + p := new(DisplayVariableToggle_ToggleType) + *p = x + return p +} + +func (x DisplayVariableToggle_ToggleType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (DisplayVariableToggle_ToggleType) Descriptor() protoreflect.EnumDescriptor { + return file_bpmetadata_ui_proto_enumTypes[1].Descriptor() +} + +func (DisplayVariableToggle_ToggleType) Type() protoreflect.EnumType { + return &file_bpmetadata_ui_proto_enumTypes[1] +} + +func (x DisplayVariableToggle_ToggleType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use DisplayVariableToggle_ToggleType.Descriptor instead. +func (DisplayVariableToggle_ToggleType) EnumDescriptor() ([]byte, []int) { + return file_bpmetadata_ui_proto_rawDescGZIP(), []int{2, 0} +} + +// Visibility defines how the output is exposed. +// Gen: manually-authored. +type DisplayOutput_Visibility int32 + +const ( + // Default + DisplayOutput_VISIBILITY_UNSPECIFIED DisplayOutput_Visibility = 0 + // Expose output as root module output. + DisplayOutput_VISIBILITY_ROOT DisplayOutput_Visibility = 1 +) + +// Enum value maps for DisplayOutput_Visibility. +var ( + DisplayOutput_Visibility_name = map[int32]string{ + 0: "VISIBILITY_UNSPECIFIED", + 1: "VISIBILITY_ROOT", + } + DisplayOutput_Visibility_value = map[string]int32{ + "VISIBILITY_UNSPECIFIED": 0, + "VISIBILITY_ROOT": 1, + } +) + +func (x DisplayOutput_Visibility) Enum() *DisplayOutput_Visibility { + p := new(DisplayOutput_Visibility) + *p = x + return p +} + +func (x DisplayOutput_Visibility) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (DisplayOutput_Visibility) Descriptor() protoreflect.EnumDescriptor { + return file_bpmetadata_ui_proto_enumTypes[2].Descriptor() +} + +func (DisplayOutput_Visibility) Type() protoreflect.EnumType { + return &file_bpmetadata_ui_proto_enumTypes[2] +} + +func (x DisplayOutput_Visibility) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use DisplayOutput_Visibility.Descriptor instead. +func (DisplayOutput_Visibility) EnumDescriptor() ([]byte, []int) { + return file_bpmetadata_ui_proto_rawDescGZIP(), []int{8, 0} +} + +// BlueprintUIInput is the structure for holding Input and Input Section (i.e. groups) specific metadata. +type BlueprintUIInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // variables is a map defining all inputs on the UI. + // Gen: partial + Variables map[string]*DisplayVariable `protobuf:"bytes,1,rep,name=variables,proto3" json:"variables,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3" yaml:"variables,omitempty"` // @gotags: json:"variables,omitempty" yaml:"variables,omitempty" + // Sections is a generic structure for grouping inputs together. + // Gen: manually-authored + Sections []*DisplaySection `protobuf:"bytes,2,rep,name=sections,proto3" json:"sections,omitempty" yaml:"sections,omitempty"` // @gotags: json:"sections,omitempty" yaml:"sections,omitempty" + // List of boolean groups that will be referenced by properties. + // Gen: manually-authored + BooleanGroups []*BooleanGroup `protobuf:"bytes,3,rep,name=boolean_groups,json=booleanGroups,proto3" json:"booleanGroups,omitempty" yaml:"booleanGroups,omitempty"` // @gotags: json:"booleanGroups,omitempty" yaml:"booleanGroups,omitempty" +} + +func (x *BlueprintUIInput) Reset() { + *x = BlueprintUIInput{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_ui_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlueprintUIInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlueprintUIInput) ProtoMessage() {} + +func (x *BlueprintUIInput) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_ui_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlueprintUIInput.ProtoReflect.Descriptor instead. +func (*BlueprintUIInput) Descriptor() ([]byte, []int) { + return file_bpmetadata_ui_proto_rawDescGZIP(), []int{0} +} + +func (x *BlueprintUIInput) GetVariables() map[string]*DisplayVariable { + if x != nil { + return x.Variables + } + return nil +} + +func (x *BlueprintUIInput) GetSections() []*DisplaySection { + if x != nil { + return x.Sections + } + return nil +} + +func (x *BlueprintUIInput) GetBooleanGroups() []*BooleanGroup { + if x != nil { + return x.BooleanGroups + } + return nil +} + +// Additional display specific metadata pertaining to a particular +// input variable. +type DisplayVariable struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The variable name from the corresponding standard metadata file. + // Gen: auto-generated - the Terraform variable name + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name" yaml:"name"` // @gotags: json:"name" yaml:"name" + // Visible title for the variable on the UI. If not present, + // Name will be used for the Title. + // Gen: auto-generated - the Terraform variable converted to title case e.g. + // variable "bucket_admins" will convert to "Bucket Admins" as the title. + Title string `protobuf:"bytes,2,opt,name=title,proto3" json:"title" yaml:"title"` // @gotags: json:"title" yaml:"title" + // A flag to hide or show the variable on the UI. + // Gen: manually-authored + Invisible bool `protobuf:"varint,3,opt,name=invisible,proto3" json:"invisible,omitempty" yaml:"invisible,omitempty"` // @gotags: json:"invisible,omitempty" yaml:"invisible,omitempty" + // Variable tooltip. + // Gen: manually-authored + Tooltip string `protobuf:"bytes,4,opt,name=tooltip,proto3" json:"tooltip,omitempty" yaml:"tooltip,omitempty"` // @gotags: json:"tooltip,omitempty" yaml:"tooltip,omitempty" + // Placeholder text (when there is no default). + // Gen: manually-authored + Placeholder string `protobuf:"bytes,5,opt,name=placeholder,proto3" json:"placeholder,omitempty" yaml:"placeholder,omitempty"` // @gotags: json:"placeholder,omitempty" yaml:"placeholder,omitempty" + // Regex based validation rules for the variable. + // Gen: manually-authored + RegexValidation string `protobuf:"bytes,6,opt,name=regex_validation,json=regexValidation,proto3" json:"regexValidation,omitempty" yaml:"regexValidation,omitempty"` // @gotags: json:"regexValidation,omitempty" yaml:"regexValidation,omitempty" + // Minimum no. of inputs for the input variable. + // Gen: manually-authored + MinItems int32 `protobuf:"varint,7,opt,name=min_items,json=minItems,proto3" json:"minItems,omitempty" yaml:"minItems,omitempty"` // @gotags: json:"minItems,omitempty" yaml:"minItems,omitempty" + // Max no. of inputs for the input variable. + // Gen: manually-authored + MaxItems int32 `protobuf:"varint,8,opt,name=max_items,json=maxItems,proto3" json:"maxItems,omitempty" yaml:"maxItems,omitempty"` // @gotags: json:"maxItems,omitempty" yaml:"maxItems,omitempty" + // Minimum length for string values. + // Gen: manually-authored + MinLength int32 `protobuf:"varint,9,opt,name=min_length,json=minLength,proto3" json:"minLength,omitempty" yaml:"minLength,omitempty"` // @gotags: json:"minLength,omitempty" yaml:"minLength,omitempty" + // Max length for string values. + // Gen: manually-authored + MaxLength int32 `protobuf:"varint,10,opt,name=max_length,json=maxLength,proto3" json:"maxLength,omitempty" yaml:"maxLength,omitempty"` // @gotags: json:"maxLength,omitempty" yaml:"maxLength,omitempty" + // Minimum value for numeric types. + // Gen: manually-authored + Min float32 `protobuf:"fixed32,11,opt,name=min,proto3" json:"min,omitempty" yaml:"min,omitempty"` // @gotags: json:"min,omitempty" yaml:"min,omitempty" + // Max value for numeric types. + // Gen: manually-authored + Max float32 `protobuf:"fixed32,12,opt,name=max,proto3" json:"max,omitempty" yaml:"max,omitempty"` // @gotags: json:"max,omitempty" yaml:"max,omitempty" + // The name of a section to which this variable belongs. + // variables belong to the root section if this field is + // not set. + // Gen: manually-authored + Section string `protobuf:"bytes,13,opt,name=section,proto3" json:"section,omitempty" yaml:"section,omitempty"` // @gotags: json:"section,omitempty" yaml:"section,omitempty" + // UI extension associated with the input variable. + // E.g. for rendering a GCE machine type selector: + // + // xGoogleProperty: + // + // type: GCE_MACHINE_TYPE + // zoneProperty: myZone + // gceMachineType: + // minCpu: 2 + // minRamGb: + // + // Gen: manually-authored + XGoogleProperty *GooglePropertyExtension `protobuf:"bytes,14,opt,name=x_google_property,json=xGoogleProperty,proto3" json:"xGoogleProperty,omitempty" yaml:"xGoogleProperty,omitempty"` // @gotags: json:"xGoogleProperty,omitempty" yaml:"xGoogleProperty,omitempty" + // Text describing the validation rules for the property. Typically shown + // after an invalid input. + // Optional. UTF-8 text. No markup. At most 128 characters. + // Gen: manually-authored + Validation string `protobuf:"bytes,15,opt,name=validation,proto3" json:"validation,omitempty" yaml:"validation,omitempty"` // @gotags: json:"validation,omitempty" yaml:"validation,omitempty" + // Property subtext, displayed below the title. + // Gen: manually-authored + Subtext string `protobuf:"bytes,16,opt,name=subtext,proto3" json:"subtext,omitempty" yaml:"subtext,omitempty"` // @gotags: json:"subtext,omitempty" yaml:"subtext,omitempty" + // Labels for enum values. + // Values must be UTF-8 text with no markup, and at most 64 characters. + // Gen: manually-authored + EnumValueLabels []*ValueLabel `protobuf:"bytes,17,rep,name=enum_value_labels,json=enumValueLabels,proto3" json:"enumValueLabels,omitempty" yaml:"enumValueLabels,omitempty"` // @gotags: json:"enumValueLabels,omitempty" yaml:"enumValueLabels,omitempty" + // Indicates the "advanced" level of the input property. Level 0 (default) + // will always be shown. Level 1 corresponds to one expansion (user clicks + // "show advanced options" or "more options"). Higher levels correspond to + // further expansions, or they may be collapsed to level 1 by the UI + // implementation. + // Optional. + // Gen: manually-authored + Level int32 `protobuf:"varint,18,opt,name=level,proto3" json:"level,omitempty" yaml:"level,omitempty"` // @gotags: json:"level,omitempty" yaml:"level,omitempty" + // The name of a boolean group from Input.booleanGroups to which this + // property belongs. Only allowed for properties declared as type boolean in + // the schema. Properties in a boolean group must be adjacent in the + // properties list and must belong to the same section (if any). + // Optional. + // Gen: manually-authored + BooleanGroup string `protobuf:"bytes,20,opt,name=boolean_group,json=booleanGroup,proto3" json:"booleanGroup,omitempty" yaml:"booleanGroup,omitempty"` // @gotags: json:"booleanGroup,omitempty" yaml:"booleanGroup,omitempty" + AltDefaults []*DisplayVariable_AlternateDefault `protobuf:"bytes,21,rep,name=alt_defaults,json=altDefaults,proto3" json:"altDefaults,omitempty" yaml:"altDefaults,omitempty"` // @gotags: json:"altDefaults,omitempty" yaml:"altDefaults,omitempty" + ToggleUsingVariables []*DisplayVariableToggle `protobuf:"bytes,22,rep,name=toggle_using_variables,json=toggleUsingVariables,proto3" json:"toggleUsingVariables,omitempty" yaml:"toggleUsingVariables,omitempty"` // @gotags: json:"toggleUsingVariables,omitempty" yaml:"toggleUsingVariables,omitempty" +} + +func (x *DisplayVariable) Reset() { + *x = DisplayVariable{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_ui_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DisplayVariable) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DisplayVariable) ProtoMessage() {} + +func (x *DisplayVariable) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_ui_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DisplayVariable.ProtoReflect.Descriptor instead. +func (*DisplayVariable) Descriptor() ([]byte, []int) { + return file_bpmetadata_ui_proto_rawDescGZIP(), []int{1} +} + +func (x *DisplayVariable) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *DisplayVariable) GetTitle() string { + if x != nil { + return x.Title + } + return "" +} + +func (x *DisplayVariable) GetInvisible() bool { + if x != nil { + return x.Invisible + } + return false +} + +func (x *DisplayVariable) GetTooltip() string { + if x != nil { + return x.Tooltip + } + return "" +} + +func (x *DisplayVariable) GetPlaceholder() string { + if x != nil { + return x.Placeholder + } + return "" +} + +func (x *DisplayVariable) GetRegexValidation() string { + if x != nil { + return x.RegexValidation + } + return "" +} + +func (x *DisplayVariable) GetMinItems() int32 { + if x != nil { + return x.MinItems + } + return 0 +} + +func (x *DisplayVariable) GetMaxItems() int32 { + if x != nil { + return x.MaxItems + } + return 0 +} + +func (x *DisplayVariable) GetMinLength() int32 { + if x != nil { + return x.MinLength + } + return 0 +} + +func (x *DisplayVariable) GetMaxLength() int32 { + if x != nil { + return x.MaxLength + } + return 0 +} + +func (x *DisplayVariable) GetMin() float32 { + if x != nil { + return x.Min + } + return 0 +} + +func (x *DisplayVariable) GetMax() float32 { + if x != nil { + return x.Max + } + return 0 +} + +func (x *DisplayVariable) GetSection() string { + if x != nil { + return x.Section + } + return "" +} + +func (x *DisplayVariable) GetXGoogleProperty() *GooglePropertyExtension { + if x != nil { + return x.XGoogleProperty + } + return nil +} + +func (x *DisplayVariable) GetValidation() string { + if x != nil { + return x.Validation + } + return "" +} + +func (x *DisplayVariable) GetSubtext() string { + if x != nil { + return x.Subtext + } + return "" +} + +func (x *DisplayVariable) GetEnumValueLabels() []*ValueLabel { + if x != nil { + return x.EnumValueLabels + } + return nil +} + +func (x *DisplayVariable) GetLevel() int32 { + if x != nil { + return x.Level + } + return 0 +} + +func (x *DisplayVariable) GetBooleanGroup() string { + if x != nil { + return x.BooleanGroup + } + return "" +} + +func (x *DisplayVariable) GetAltDefaults() []*DisplayVariable_AlternateDefault { + if x != nil { + return x.AltDefaults + } + return nil +} + +func (x *DisplayVariable) GetToggleUsingVariables() []*DisplayVariableToggle { + if x != nil { + return x.ToggleUsingVariables + } + return nil +} + +type DisplayVariableToggle struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the variable used to toggle the display of another variable. + VariableName string `protobuf:"bytes,1,opt,name=variable_name,json=variableName,proto3" json:"variableName,omitempty" yaml:"variableName,omitempty"` // @gotags: json:"variableName,omitempty" yaml:"variableName,omitempty" + // The value of the variable used to toggle the display of another variable. + VariableValues []string `protobuf:"bytes,2,rep,name=variable_values,json=variableValues,proto3" json:"variableValues,omitempty" yaml:"variableValue,omitempty"` // @gotags: json:"variableValues,omitempty" yaml:"variableValue,omitempty" + // The type of the variable used to toggle the display of another variable. + Type DisplayVariableToggle_ToggleType `protobuf:"varint,3,opt,name=type,proto3,enum=google.cloud.config.bpmetadata.DisplayVariableToggle_ToggleType" json:"type,omitempty" yaml:"type,omitempty"` // @gotags: json:"type,omitempty" yaml:"type,omitempty" +} + +func (x *DisplayVariableToggle) Reset() { + *x = DisplayVariableToggle{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_ui_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DisplayVariableToggle) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DisplayVariableToggle) ProtoMessage() {} + +func (x *DisplayVariableToggle) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_ui_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DisplayVariableToggle.ProtoReflect.Descriptor instead. +func (*DisplayVariableToggle) Descriptor() ([]byte, []int) { + return file_bpmetadata_ui_proto_rawDescGZIP(), []int{2} +} + +func (x *DisplayVariableToggle) GetVariableName() string { + if x != nil { + return x.VariableName + } + return "" +} + +func (x *DisplayVariableToggle) GetVariableValues() []string { + if x != nil { + return x.VariableValues + } + return nil +} + +func (x *DisplayVariableToggle) GetType() DisplayVariableToggle_ToggleType { + if x != nil { + return x.Type + } + return DisplayVariableToggle_DISPLAY_VARIABLE_TOGGLE_TYPE_UNSPECIFIED +} + +type ValueLabel struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Label string `protobuf:"bytes,1,opt,name=label,proto3" json:"label,omitempty" yaml:"label,omitempty"` // @gotags: json:"label,omitempty" yaml:"label,omitempty" + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty" yaml:"value,omitempty"` // @gotags: json:"value,omitempty" yaml:"value,omitempty" +} + +func (x *ValueLabel) Reset() { + *x = ValueLabel{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_ui_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValueLabel) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValueLabel) ProtoMessage() {} + +func (x *ValueLabel) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_ui_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValueLabel.ProtoReflect.Descriptor instead. +func (*ValueLabel) Descriptor() ([]byte, []int) { + return file_bpmetadata_ui_proto_rawDescGZIP(), []int{3} +} + +func (x *ValueLabel) GetLabel() string { + if x != nil { + return x.Label + } + return "" +} + +func (x *ValueLabel) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +// A logical group of variables. [Section][]s may also be grouped into +// sub-sections. +type DisplaySection struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the section, referenced by DisplayVariable.Section + // Section names must be unique. + // Gen: manually-authored + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name" yaml:"name"` // @gotags: json:"name" yaml:"name" + // Section title. + // If not provided, name will be used instead. + // Gen: manually-authored + Title string `protobuf:"bytes,2,opt,name=title,proto3" json:"title,omitempty" yaml:"title,omitempty"` // @gotags: json:"title,omitempty" yaml:"title,omitempty" + // Section tooltip. + // Gen: manually-authored + Tooltip string `protobuf:"bytes,3,opt,name=tooltip,proto3" json:"tooltip,omitempty" yaml:"tooltip,omitempty"` // @gotags: json:"tooltip,omitempty" yaml:"tooltip,omitempty" + // Section subtext. + // Gen: manually-authored + Subtext string `protobuf:"bytes,4,opt,name=subtext,proto3" json:"subtext,omitempty" yaml:"subtext,omitempty"` // @gotags: json:"subtext,omitempty" yaml:"subtext,omitempty" + // The name of the parent section (if parent is not the root section). + // Gen: manually-authored + Parent string `protobuf:"bytes,5,opt,name=parent,proto3" json:"parent,omitempty" yaml:"parent,omitempty"` // @gotags: json:"parent,omitempty" yaml:"parent,omitempty" +} + +func (x *DisplaySection) Reset() { + *x = DisplaySection{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_ui_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DisplaySection) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DisplaySection) ProtoMessage() {} + +func (x *DisplaySection) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_ui_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DisplaySection.ProtoReflect.Descriptor instead. +func (*DisplaySection) Descriptor() ([]byte, []int) { + return file_bpmetadata_ui_proto_rawDescGZIP(), []int{4} +} + +func (x *DisplaySection) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *DisplaySection) GetTitle() string { + if x != nil { + return x.Title + } + return "" +} + +func (x *DisplaySection) GetTooltip() string { + if x != nil { + return x.Tooltip + } + return "" +} + +func (x *DisplaySection) GetSubtext() string { + if x != nil { + return x.Subtext + } + return "" +} + +func (x *DisplaySection) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +// Groups a list of boolean properties into one logical property for the +// purposes of the configuration form. The title of a [BooleanGroup][] has the +// same styling as the title of an ordinary property, and individual properties +// in the group will be packed more tightly together to indicate their +// association. Child of [Input][]. +type BooleanGroup struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the group, referenced by [Property][] + // .booleanGroup. + // BooleanGroup names must be unique. Required. + // Gen: manually-authored + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name" yaml:"name"` // @gotags: json:"name" yaml:"name" + // Group title. + // Required. UTF-8 text. No markup. At most 64 characters. + // Gen: manually-authored + Title string `protobuf:"bytes,2,opt,name=title,proto3" json:"title" yaml:"title"` // @gotags: json:"title" yaml:"title" + // Group tooltip. + // Optional. HTML (<a href> tags only). At most 256 + // characters. + // Gen: manually-authored + Tooltip string `protobuf:"bytes,3,opt,name=tooltip,proto3" json:"tooltip,omitempty" yaml:"tooltip,omitempty"` // @gotags: json:"tooltip,omitempty" yaml:"tooltip,omitempty" + // Group subtext. + // Optional. HTML (<a href> tags only). At most 256 + // characters. + // Gen: manually-authored + Subtext string `protobuf:"bytes,4,opt,name=subtext,proto3" json:"subtext,omitempty" yaml:"subtext,omitempty"` // @gotags: json:"subtext,omitempty" yaml:"subtext,omitempty" +} + +func (x *BooleanGroup) Reset() { + *x = BooleanGroup{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_ui_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BooleanGroup) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BooleanGroup) ProtoMessage() {} + +func (x *BooleanGroup) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_ui_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BooleanGroup.ProtoReflect.Descriptor instead. +func (*BooleanGroup) Descriptor() ([]byte, []int) { + return file_bpmetadata_ui_proto_rawDescGZIP(), []int{5} +} + +func (x *BooleanGroup) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *BooleanGroup) GetTitle() string { + if x != nil { + return x.Title + } + return "" +} + +func (x *BooleanGroup) GetTooltip() string { + if x != nil { + return x.Tooltip + } + return "" +} + +func (x *BooleanGroup) GetSubtext() string { + if x != nil { + return x.Subtext + } + return "" +} + +type BlueprintUIOutput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Short message to be displayed while the blueprint is deploying. + // At most 128 characters. + // Gen: manually-authored + OutputMessage string `protobuf:"bytes,1,opt,name=output_message,json=outputMessage,proto3" json:"outputMessage,omitempty" yaml:"outputMessage,omitempty"` // @gotags: json:"outputMessage,omitempty" yaml:"outputMessage,omitempty" + // List of suggested actions to take. + // Gen: manually-authored + SuggestedActions []*UIActionItem `protobuf:"bytes,2,rep,name=suggested_actions,json=suggestedActions,proto3" json:"suggestedActions,omitempty" yaml:"suggestedActions,omitempty"` // @gotags: json:"suggestedActions,omitempty" yaml:"suggestedActions,omitempty" + // outputs is a map defining a subset of Terraform outputs on the UI + // that may need additional UI configuration. + // Gen: manually-authored + Outputs map[string]*DisplayOutput `protobuf:"bytes,3,rep,name=outputs,proto3" json:"outputs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3" yaml:"outputs,omitempty"` // @gotags: json:"outputs,omitempty" yaml:"outputs,omitempty" +} + +func (x *BlueprintUIOutput) Reset() { + *x = BlueprintUIOutput{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_ui_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlueprintUIOutput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlueprintUIOutput) ProtoMessage() {} + +func (x *BlueprintUIOutput) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_ui_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlueprintUIOutput.ProtoReflect.Descriptor instead. +func (*BlueprintUIOutput) Descriptor() ([]byte, []int) { + return file_bpmetadata_ui_proto_rawDescGZIP(), []int{6} +} + +func (x *BlueprintUIOutput) GetOutputMessage() string { + if x != nil { + return x.OutputMessage + } + return "" +} + +func (x *BlueprintUIOutput) GetSuggestedActions() []*UIActionItem { + if x != nil { + return x.SuggestedActions + } + return nil +} + +func (x *BlueprintUIOutput) GetOutputs() map[string]*DisplayOutput { + if x != nil { + return x.Outputs + } + return nil +} + +// An item appearing in a list of required or suggested steps. +type UIActionItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Summary heading for the item. + // Required. Accepts string expressions. At most 64 characters. + // Gen: manually-authored + Heading string `protobuf:"bytes,1,opt,name=heading,proto3" json:"heading" yaml:"heading"` // @gotags: json:"heading" yaml:"heading" + // Longer description of the item. + // At least one description or snippet is required. + // Accepts string expressions. HTML <a href> + // tags only. At most 512 characters. + // Gen: manually-authored + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty" yaml:"description,omitempty"` // @gotags: json:"description,omitempty" yaml:"description,omitempty" + // Fixed-width formatted code snippet. + // At least one description or snippet is required. + // Accepts string expressions. UTF-8 text. At most 512 characters. + // Gen: manually-authored + Snippet string `protobuf:"bytes,3,opt,name=snippet,proto3" json:"snippet,omitempty" yaml:"snippet,omitempty"` // @gotags: json:"snippet,omitempty" yaml:"snippet,omitempty" + // If present, this expression determines whether the item is shown. + // Should be in the form of a Boolean expression e.g. outputs.hasExternalIP + // where `externalIP` is the output. + // Gen: manually-authored + ShowIf string `protobuf:"bytes,4,opt,name=show_if,json=showIf,proto3" json:"showIf,omitempty" yaml:"showIf,omitempty"` // @gotags: json:"showIf,omitempty" yaml:"showIf,omitempty" +} + +func (x *UIActionItem) Reset() { + *x = UIActionItem{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_ui_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UIActionItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UIActionItem) ProtoMessage() {} + +func (x *UIActionItem) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_ui_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UIActionItem.ProtoReflect.Descriptor instead. +func (*UIActionItem) Descriptor() ([]byte, []int) { + return file_bpmetadata_ui_proto_rawDescGZIP(), []int{7} +} + +func (x *UIActionItem) GetHeading() string { + if x != nil { + return x.Heading + } + return "" +} + +func (x *UIActionItem) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *UIActionItem) GetSnippet() string { + if x != nil { + return x.Snippet + } + return "" +} + +func (x *UIActionItem) GetShowIf() string { + if x != nil { + return x.ShowIf + } + return "" +} + +// Additional display specific metadata pertaining to a particular +// Terraform output. Only applicable for Outputs that are URLs. +type DisplayOutput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // open_in_new_tab defines if the Output action should be opened + // in a new tab. + // Gen: manually-authored + OpenInNewTab bool `protobuf:"varint,1,opt,name=open_in_new_tab,json=openInNewTab,proto3" json:"openInNewTab,omitempty" yaml:"openInNewTab,omitempty"` // @gotags: json:"openInNewTab,omitempty" yaml:"openInNewTab,omitempty" + // show_in_notification defines if the Output should shown in + // notification for the deployment. + // Gen: manually-authored + ShowInNotification bool `protobuf:"varint,2,opt,name=show_in_notification,json=showInNotification,proto3" json:"showInNotification,omitempty" yaml:"showInNotification,omitempty"` // @gotags: json:"showInNotification,omitempty" yaml:"showInNotification,omitempty" + // label to display on the Output action button + // Gen: manually-authored + Label string `protobuf:"bytes,3,opt,name=label,proto3" json:"label,omitempty" yaml:"label,omitEmpty"` // @gotags: json:"label,omitempty" yaml:"label,omitEmpty" + Visibility DisplayOutput_Visibility `protobuf:"varint,4,opt,name=visibility,proto3,enum=google.cloud.config.bpmetadata.DisplayOutput_Visibility" json:"visibility,omitempty" yaml:"visibility,omitEmpty"` // @gotags: json:"visibility,omitempty" yaml:"visibility,omitEmpty" +} + +func (x *DisplayOutput) Reset() { + *x = DisplayOutput{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_ui_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DisplayOutput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DisplayOutput) ProtoMessage() {} + +func (x *DisplayOutput) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_ui_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DisplayOutput.ProtoReflect.Descriptor instead. +func (*DisplayOutput) Descriptor() ([]byte, []int) { + return file_bpmetadata_ui_proto_rawDescGZIP(), []int{8} +} + +func (x *DisplayOutput) GetOpenInNewTab() bool { + if x != nil { + return x.OpenInNewTab + } + return false +} + +func (x *DisplayOutput) GetShowInNotification() bool { + if x != nil { + return x.ShowInNotification + } + return false +} + +func (x *DisplayOutput) GetLabel() string { + if x != nil { + return x.Label + } + return "" +} + +func (x *DisplayOutput) GetVisibility() DisplayOutput_Visibility { + if x != nil { + return x.Visibility + } + return DisplayOutput_VISIBILITY_UNSPECIFIED +} + +// Alternate default value. +// This allows authors to define an alternative value for pre identified usecases such as security. +// If specified, this value can be used instead of the default value in BlueprintVariable. +// Gen: manually-authored. +type DisplayVariable_AlternateDefault struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type DisplayVariable_AlternateDefault_AlternateType `protobuf:"varint,1,opt,name=type,proto3,enum=google.cloud.config.bpmetadata.DisplayVariable_AlternateDefault_AlternateType" json:"type,omitempty" yaml:"type,omitempty"` // @gotags: json:"type,omitempty" yaml:"type,omitempty" + // Value of the alternate default. + Value *structpb.Value `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty" yaml:"value,omitempty"` // @gotags: json:"value,omitempty" yaml:"value,omitempty" +} + +func (x *DisplayVariable_AlternateDefault) Reset() { + *x = DisplayVariable_AlternateDefault{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_ui_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DisplayVariable_AlternateDefault) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DisplayVariable_AlternateDefault) ProtoMessage() {} + +func (x *DisplayVariable_AlternateDefault) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_ui_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DisplayVariable_AlternateDefault.ProtoReflect.Descriptor instead. +func (*DisplayVariable_AlternateDefault) Descriptor() ([]byte, []int) { + return file_bpmetadata_ui_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *DisplayVariable_AlternateDefault) GetType() DisplayVariable_AlternateDefault_AlternateType { + if x != nil { + return x.Type + } + return DisplayVariable_AlternateDefault_ALTERNATE_TYPE_UNSPECIFIED +} + +func (x *DisplayVariable_AlternateDefault) GetValue() *structpb.Value { + if x != nil { + return x.Value + } + return nil +} + +var File_bpmetadata_ui_proto protoreflect.FileDescriptor + +var file_bpmetadata_ui_proto_rawDesc = []byte{ + 0x0a, 0x13, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x75, 0x69, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, + 0x75, 0x69, 0x5f, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x81, 0x03, 0x0a, + 0x10, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x55, 0x49, 0x49, 0x6e, 0x70, 0x75, + 0x74, 0x12, 0x5d, 0x0a, 0x09, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x55, + 0x49, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x2e, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, + 0x12, 0x4a, 0x0a, 0x08, 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x44, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x53, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x08, 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x53, 0x0a, 0x0e, + 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x47, 0x72, 0x6f, + 0x75, 0x70, 0x52, 0x0d, 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x47, 0x72, 0x6f, 0x75, 0x70, + 0x73, 0x1a, 0x6d, 0x0a, 0x0e, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x45, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x56, 0x61, 0x72, + 0x69, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x22, 0x8c, 0x09, 0x0a, 0x0f, 0x44, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x56, 0x61, 0x72, 0x69, + 0x61, 0x62, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x1c, + 0x0a, 0x09, 0x69, 0x6e, 0x76, 0x69, 0x73, 0x69, 0x62, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x09, 0x69, 0x6e, 0x76, 0x69, 0x73, 0x69, 0x62, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x74, 0x6f, 0x6f, 0x6c, 0x74, 0x69, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, + 0x6f, 0x6f, 0x6c, 0x74, 0x69, 0x70, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x68, + 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x6c, 0x61, + 0x63, 0x65, 0x68, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x67, 0x65, + 0x78, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, + 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1d, 0x0a, + 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, + 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x10, 0x0a, 0x03, 0x6d, + 0x69, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x02, 0x52, 0x03, 0x6d, 0x69, 0x6e, 0x12, 0x10, 0x0a, + 0x03, 0x6d, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x02, 0x52, 0x03, 0x6d, 0x61, 0x78, 0x12, + 0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x63, 0x0a, 0x11, 0x78, 0x5f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x18, 0x0e, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x70, + 0x65, 0x72, 0x74, 0x79, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x78, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x12, 0x1e, + 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0f, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, + 0x0a, 0x07, 0x73, 0x75, 0x62, 0x74, 0x65, 0x78, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x73, 0x75, 0x62, 0x74, 0x65, 0x78, 0x74, 0x12, 0x56, 0x0a, 0x11, 0x65, 0x6e, 0x75, 0x6d, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x11, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, + 0x0f, 0x65, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x12, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x05, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x23, 0x0a, 0x0d, 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, + 0x6e, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x14, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x62, + 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x63, 0x0a, 0x0c, 0x61, + 0x6c, 0x74, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x15, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x44, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, + 0x6c, 0x65, 0x2e, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x65, 0x44, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x52, 0x0b, 0x61, 0x6c, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, + 0x12, 0x6b, 0x0a, 0x16, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x5f, 0x75, 0x73, 0x69, 0x6e, 0x67, + 0x5f, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x44, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, + 0x65, 0x54, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x52, 0x14, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x55, + 0x73, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x1a, 0x89, 0x02, + 0x0a, 0x10, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x65, 0x44, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x12, 0x62, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x4e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x44, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, + 0x65, 0x2e, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x65, 0x44, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x2e, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, + 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x22, 0x63, 0x0a, 0x0d, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, + 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1e, 0x0a, 0x1a, 0x41, 0x4c, 0x54, 0x45, 0x52, 0x4e, 0x41, + 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x41, 0x4c, 0x54, 0x45, 0x52, 0x4e, 0x41, + 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x43, 0x55, 0x52, 0x49, 0x54, 0x59, + 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x41, 0x4c, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x54, 0x45, 0x5f, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x43, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x13, 0x10, 0x14, 0x22, + 0xf5, 0x02, 0x0a, 0x15, 0x44, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x56, 0x61, 0x72, 0x69, 0x61, + 0x62, 0x6c, 0x65, 0x54, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x76, 0x61, 0x72, + 0x69, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x27, + 0x0a, 0x0f, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, + 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x54, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x40, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x56, 0x61, + 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x2e, 0x54, 0x6f, 0x67, + 0x67, 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0xb7, 0x01, + 0x0a, 0x0a, 0x54, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2c, 0x0a, 0x28, + 0x44, 0x49, 0x53, 0x50, 0x4c, 0x41, 0x59, 0x5f, 0x56, 0x41, 0x52, 0x49, 0x41, 0x42, 0x4c, 0x45, + 0x5f, 0x54, 0x4f, 0x47, 0x47, 0x4c, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, + 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x28, 0x0a, 0x24, 0x44, 0x49, + 0x53, 0x50, 0x4c, 0x41, 0x59, 0x5f, 0x56, 0x41, 0x52, 0x49, 0x41, 0x42, 0x4c, 0x45, 0x5f, 0x54, + 0x4f, 0x47, 0x47, 0x4c, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x45, + 0x41, 0x4e, 0x10, 0x01, 0x12, 0x27, 0x0a, 0x23, 0x44, 0x49, 0x53, 0x50, 0x4c, 0x41, 0x59, 0x5f, + 0x56, 0x41, 0x52, 0x49, 0x41, 0x42, 0x4c, 0x45, 0x5f, 0x54, 0x4f, 0x47, 0x47, 0x4c, 0x45, 0x5f, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x28, 0x0a, + 0x24, 0x44, 0x49, 0x53, 0x50, 0x4c, 0x41, 0x59, 0x5f, 0x56, 0x41, 0x52, 0x49, 0x41, 0x42, 0x4c, + 0x45, 0x5f, 0x54, 0x4f, 0x47, 0x47, 0x4c, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, + 0x54, 0x45, 0x47, 0x45, 0x52, 0x10, 0x03, 0x22, 0x38, 0x0a, 0x0a, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x22, 0x86, 0x01, 0x0a, 0x0e, 0x44, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x53, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x74, 0x6f, 0x6f, 0x6c, 0x74, 0x69, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x74, 0x6f, 0x6f, 0x6c, 0x74, 0x69, 0x70, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x74, + 0x65, 0x78, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x74, 0x65, + 0x78, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x22, 0x6c, 0x0a, 0x0c, 0x42, 0x6f, + 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, + 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, + 0x69, 0x74, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x6f, 0x6f, 0x6c, 0x74, 0x69, 0x70, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x6f, 0x6f, 0x6c, 0x74, 0x69, 0x70, 0x12, 0x18, + 0x0a, 0x07, 0x73, 0x75, 0x62, 0x74, 0x65, 0x78, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x73, 0x75, 0x62, 0x74, 0x65, 0x78, 0x74, 0x22, 0xda, 0x02, 0x0a, 0x11, 0x42, 0x6c, 0x75, + 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x55, 0x49, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x25, + 0x0a, 0x0e, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x59, 0x0a, 0x11, 0x73, 0x75, 0x67, 0x67, 0x65, 0x73, 0x74, + 0x65, 0x64, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x55, 0x49, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x10, + 0x73, 0x75, 0x67, 0x67, 0x65, 0x73, 0x74, 0x65, 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x58, 0x0a, 0x07, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x42, 0x6c, 0x75, 0x65, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x55, 0x49, 0x4f, 0x75, + 0x74, 0x70, 0x75, 0x74, 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x07, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x1a, 0x69, 0x0a, 0x0c, 0x4f, 0x75, + 0x74, 0x70, 0x75, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x43, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x69, 0x73, + 0x70, 0x6c, 0x61, 0x79, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x7d, 0x0a, 0x0c, 0x55, 0x49, 0x41, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x12, + 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x6e, 0x69, 0x70, 0x70, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x73, 0x6e, 0x69, 0x70, 0x70, 0x65, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x73, + 0x68, 0x6f, 0x77, 0x5f, 0x69, 0x66, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x68, + 0x6f, 0x77, 0x49, 0x66, 0x22, 0x97, 0x02, 0x0a, 0x0d, 0x44, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, + 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x25, 0x0a, 0x0f, 0x6f, 0x70, 0x65, 0x6e, 0x5f, 0x69, + 0x6e, 0x5f, 0x6e, 0x65, 0x77, 0x5f, 0x74, 0x61, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0c, 0x6f, 0x70, 0x65, 0x6e, 0x49, 0x6e, 0x4e, 0x65, 0x77, 0x54, 0x61, 0x62, 0x12, 0x30, 0x0a, + 0x14, 0x73, 0x68, 0x6f, 0x77, 0x5f, 0x69, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x73, 0x68, 0x6f, + 0x77, 0x49, 0x6e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x14, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x58, 0x0a, 0x0a, 0x76, 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, + 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x69, 0x73, 0x70, 0x6c, + 0x61, 0x79, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x2e, 0x56, 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, + 0x69, 0x74, 0x79, 0x52, 0x0a, 0x76, 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x22, + 0x3d, 0x0a, 0x0a, 0x56, 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x1a, 0x0a, + 0x16, 0x56, 0x49, 0x53, 0x49, 0x42, 0x49, 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x56, 0x49, 0x53, + 0x49, 0x42, 0x49, 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x52, 0x4f, 0x4f, 0x54, 0x10, 0x01, 0x42, 0x48, + 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, + 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x2d, 0x74, 0x6f, 0x6f, 0x6c, 0x6b, 0x69, 0x74, 0x2f, 0x63, 0x6c, 0x69, 0x2f, 0x62, 0x70, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_bpmetadata_ui_proto_rawDescOnce sync.Once + file_bpmetadata_ui_proto_rawDescData = file_bpmetadata_ui_proto_rawDesc +) + +func file_bpmetadata_ui_proto_rawDescGZIP() []byte { + file_bpmetadata_ui_proto_rawDescOnce.Do(func() { + file_bpmetadata_ui_proto_rawDescData = protoimpl.X.CompressGZIP(file_bpmetadata_ui_proto_rawDescData) + }) + return file_bpmetadata_ui_proto_rawDescData +} + +var file_bpmetadata_ui_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_bpmetadata_ui_proto_msgTypes = make([]protoimpl.MessageInfo, 12) +var file_bpmetadata_ui_proto_goTypes = []interface{}{ + (DisplayVariable_AlternateDefault_AlternateType)(0), // 0: google.cloud.config.bpmetadata.DisplayVariable.AlternateDefault.AlternateType + (DisplayVariableToggle_ToggleType)(0), // 1: google.cloud.config.bpmetadata.DisplayVariableToggle.ToggleType + (DisplayOutput_Visibility)(0), // 2: google.cloud.config.bpmetadata.DisplayOutput.Visibility + (*BlueprintUIInput)(nil), // 3: google.cloud.config.bpmetadata.BlueprintUIInput + (*DisplayVariable)(nil), // 4: google.cloud.config.bpmetadata.DisplayVariable + (*DisplayVariableToggle)(nil), // 5: google.cloud.config.bpmetadata.DisplayVariableToggle + (*ValueLabel)(nil), // 6: google.cloud.config.bpmetadata.ValueLabel + (*DisplaySection)(nil), // 7: google.cloud.config.bpmetadata.DisplaySection + (*BooleanGroup)(nil), // 8: google.cloud.config.bpmetadata.BooleanGroup + (*BlueprintUIOutput)(nil), // 9: google.cloud.config.bpmetadata.BlueprintUIOutput + (*UIActionItem)(nil), // 10: google.cloud.config.bpmetadata.UIActionItem + (*DisplayOutput)(nil), // 11: google.cloud.config.bpmetadata.DisplayOutput + nil, // 12: google.cloud.config.bpmetadata.BlueprintUIInput.VariablesEntry + (*DisplayVariable_AlternateDefault)(nil), // 13: google.cloud.config.bpmetadata.DisplayVariable.AlternateDefault + nil, // 14: google.cloud.config.bpmetadata.BlueprintUIOutput.OutputsEntry + (*GooglePropertyExtension)(nil), // 15: google.cloud.config.bpmetadata.GooglePropertyExtension + (*structpb.Value)(nil), // 16: google.protobuf.Value +} +var file_bpmetadata_ui_proto_depIdxs = []int32{ + 12, // 0: google.cloud.config.bpmetadata.BlueprintUIInput.variables:type_name -> google.cloud.config.bpmetadata.BlueprintUIInput.VariablesEntry + 7, // 1: google.cloud.config.bpmetadata.BlueprintUIInput.sections:type_name -> google.cloud.config.bpmetadata.DisplaySection + 8, // 2: google.cloud.config.bpmetadata.BlueprintUIInput.boolean_groups:type_name -> google.cloud.config.bpmetadata.BooleanGroup + 15, // 3: google.cloud.config.bpmetadata.DisplayVariable.x_google_property:type_name -> google.cloud.config.bpmetadata.GooglePropertyExtension + 6, // 4: google.cloud.config.bpmetadata.DisplayVariable.enum_value_labels:type_name -> google.cloud.config.bpmetadata.ValueLabel + 13, // 5: google.cloud.config.bpmetadata.DisplayVariable.alt_defaults:type_name -> google.cloud.config.bpmetadata.DisplayVariable.AlternateDefault + 5, // 6: google.cloud.config.bpmetadata.DisplayVariable.toggle_using_variables:type_name -> google.cloud.config.bpmetadata.DisplayVariableToggle + 1, // 7: google.cloud.config.bpmetadata.DisplayVariableToggle.type:type_name -> google.cloud.config.bpmetadata.DisplayVariableToggle.ToggleType + 10, // 8: google.cloud.config.bpmetadata.BlueprintUIOutput.suggested_actions:type_name -> google.cloud.config.bpmetadata.UIActionItem + 14, // 9: google.cloud.config.bpmetadata.BlueprintUIOutput.outputs:type_name -> google.cloud.config.bpmetadata.BlueprintUIOutput.OutputsEntry + 2, // 10: google.cloud.config.bpmetadata.DisplayOutput.visibility:type_name -> google.cloud.config.bpmetadata.DisplayOutput.Visibility + 4, // 11: google.cloud.config.bpmetadata.BlueprintUIInput.VariablesEntry.value:type_name -> google.cloud.config.bpmetadata.DisplayVariable + 0, // 12: google.cloud.config.bpmetadata.DisplayVariable.AlternateDefault.type:type_name -> google.cloud.config.bpmetadata.DisplayVariable.AlternateDefault.AlternateType + 16, // 13: google.cloud.config.bpmetadata.DisplayVariable.AlternateDefault.value:type_name -> google.protobuf.Value + 11, // 14: google.cloud.config.bpmetadata.BlueprintUIOutput.OutputsEntry.value:type_name -> google.cloud.config.bpmetadata.DisplayOutput + 15, // [15:15] is the sub-list for method output_type + 15, // [15:15] is the sub-list for method input_type + 15, // [15:15] is the sub-list for extension type_name + 15, // [15:15] is the sub-list for extension extendee + 0, // [0:15] is the sub-list for field type_name +} + +func init() { file_bpmetadata_ui_proto_init() } +func file_bpmetadata_ui_proto_init() { + if File_bpmetadata_ui_proto != nil { + return + } + file_bpmetadata_ui_ext_proto_init() + if !protoimpl.UnsafeEnabled { + file_bpmetadata_ui_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlueprintUIInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_ui_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DisplayVariable); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_ui_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DisplayVariableToggle); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_ui_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValueLabel); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_ui_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DisplaySection); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_ui_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BooleanGroup); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_ui_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlueprintUIOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_ui_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UIActionItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_ui_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DisplayOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_ui_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DisplayVariable_AlternateDefault); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_bpmetadata_ui_proto_rawDesc, + NumEnums: 3, + NumMessages: 12, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_bpmetadata_ui_proto_goTypes, + DependencyIndexes: file_bpmetadata_ui_proto_depIdxs, + EnumInfos: file_bpmetadata_ui_proto_enumTypes, + MessageInfos: file_bpmetadata_ui_proto_msgTypes, + }.Build() + File_bpmetadata_ui_proto = out.File + file_bpmetadata_ui_proto_rawDesc = nil + file_bpmetadata_ui_proto_goTypes = nil + file_bpmetadata_ui_proto_depIdxs = nil +} diff --git a/cli/bpmetadata/bpmetadata_ui_ext.pb.go b/cli/bpmetadata/bpmetadata_ui_ext.pb.go new file mode 100644 index 00000000000..87007d54364 --- /dev/null +++ b/cli/bpmetadata/bpmetadata_ui_ext.pb.go @@ -0,0 +1,1785 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.23.4 +// source: bpmetadata_ui_ext.proto + +package bpmetadata + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// ExtensionType specifies the type of extension. +type ExtensionType int32 + +const ( + // EXTENSIONTYPE_UNDEFINED indicates that the extension type is undefined. + ExtensionType_ET_UNDEFINED ExtensionType = 0 + // General formats. + ExtensionType_ET_EMAIL_ADDRESS ExtensionType = 1 + ExtensionType_ET_MULTI_LINE_STRING ExtensionType = 2 + ExtensionType_ET_CREATE_RESOURCE ExtensionType = 21 + // GCE related. + ExtensionType_ET_GCE_DISK_IMAGE ExtensionType = 3 + ExtensionType_ET_GCE_DISK_TYPE ExtensionType = 4 + ExtensionType_ET_GCE_DISK_SIZE ExtensionType = 5 + ExtensionType_ET_GCE_MACHINE_TYPE ExtensionType = 6 + ExtensionType_ET_GCE_NETWORK ExtensionType = 7 + ExtensionType_ET_GCE_ZONE ExtensionType = 8 + ExtensionType_ET_GCE_SUBNETWORK ExtensionType = 9 + ExtensionType_ET_GCE_REGION ExtensionType = 10 + ExtensionType_ET_GCE_GPU_TYPE ExtensionType = 11 + ExtensionType_ET_GCE_GPU_COUNT ExtensionType = 12 + ExtensionType_ET_GCE_EXTERNAL_IP ExtensionType = 13 + ExtensionType_ET_GCE_IP_FORWARDING ExtensionType = 14 + ExtensionType_ET_GCE_FIREWALL ExtensionType = 15 + ExtensionType_ET_GCE_FIREWALL_RANGE ExtensionType = 16 + ExtensionType_ET_GCE_GENERIC_RESOURCE ExtensionType = 17 + ExtensionType_ET_GCE_LOCATION ExtensionType = 22 + // GCS related. + ExtensionType_ET_GCS_BUCKET ExtensionType = 18 + // IAM related. + ExtensionType_ET_IAM_SERVICE_ACCOUNT ExtensionType = 19 + // GKE related. + ExtensionType_ET_GKE_CLUSTER ExtensionType = 20 +) + +// Enum value maps for ExtensionType. +var ( + ExtensionType_name = map[int32]string{ + 0: "ET_UNDEFINED", + 1: "ET_EMAIL_ADDRESS", + 2: "ET_MULTI_LINE_STRING", + 21: "ET_CREATE_RESOURCE", + 3: "ET_GCE_DISK_IMAGE", + 4: "ET_GCE_DISK_TYPE", + 5: "ET_GCE_DISK_SIZE", + 6: "ET_GCE_MACHINE_TYPE", + 7: "ET_GCE_NETWORK", + 8: "ET_GCE_ZONE", + 9: "ET_GCE_SUBNETWORK", + 10: "ET_GCE_REGION", + 11: "ET_GCE_GPU_TYPE", + 12: "ET_GCE_GPU_COUNT", + 13: "ET_GCE_EXTERNAL_IP", + 14: "ET_GCE_IP_FORWARDING", + 15: "ET_GCE_FIREWALL", + 16: "ET_GCE_FIREWALL_RANGE", + 17: "ET_GCE_GENERIC_RESOURCE", + 22: "ET_GCE_LOCATION", + 18: "ET_GCS_BUCKET", + 19: "ET_IAM_SERVICE_ACCOUNT", + 20: "ET_GKE_CLUSTER", + } + ExtensionType_value = map[string]int32{ + "ET_UNDEFINED": 0, + "ET_EMAIL_ADDRESS": 1, + "ET_MULTI_LINE_STRING": 2, + "ET_CREATE_RESOURCE": 21, + "ET_GCE_DISK_IMAGE": 3, + "ET_GCE_DISK_TYPE": 4, + "ET_GCE_DISK_SIZE": 5, + "ET_GCE_MACHINE_TYPE": 6, + "ET_GCE_NETWORK": 7, + "ET_GCE_ZONE": 8, + "ET_GCE_SUBNETWORK": 9, + "ET_GCE_REGION": 10, + "ET_GCE_GPU_TYPE": 11, + "ET_GCE_GPU_COUNT": 12, + "ET_GCE_EXTERNAL_IP": 13, + "ET_GCE_IP_FORWARDING": 14, + "ET_GCE_FIREWALL": 15, + "ET_GCE_FIREWALL_RANGE": 16, + "ET_GCE_GENERIC_RESOURCE": 17, + "ET_GCE_LOCATION": 22, + "ET_GCS_BUCKET": 18, + "ET_IAM_SERVICE_ACCOUNT": 19, + "ET_GKE_CLUSTER": 20, + } +) + +func (x ExtensionType) Enum() *ExtensionType { + p := new(ExtensionType) + *p = x + return p +} + +func (x ExtensionType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ExtensionType) Descriptor() protoreflect.EnumDescriptor { + return file_bpmetadata_ui_ext_proto_enumTypes[0].Descriptor() +} + +func (ExtensionType) Type() protoreflect.EnumType { + return &file_bpmetadata_ui_ext_proto_enumTypes[0] +} + +func (x ExtensionType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ExtensionType.Descriptor instead. +func (ExtensionType) EnumDescriptor() ([]byte, []int) { + return file_bpmetadata_ui_ext_proto_rawDescGZIP(), []int{0} +} + +// ExternalIPType specifies the type of external IP address. +type ExternalIPType int32 + +const ( + ExternalIPType_IP_UNSPECIFIED ExternalIPType = 0 + // EPHEMERAL indicates that the external IP address is ephemeral. + ExternalIPType_IP_EPHEMERAL ExternalIPType = 1 + // STATIC indicates that the external IP address is static. + ExternalIPType_IP_STATIC ExternalIPType = 2 + // NONE indicates that an external IP is not assigned. + ExternalIPType_IP_NONE ExternalIPType = 3 +) + +// Enum value maps for ExternalIPType. +var ( + ExternalIPType_name = map[int32]string{ + 0: "IP_UNSPECIFIED", + 1: "IP_EPHEMERAL", + 2: "IP_STATIC", + 3: "IP_NONE", + } + ExternalIPType_value = map[string]int32{ + "IP_UNSPECIFIED": 0, + "IP_EPHEMERAL": 1, + "IP_STATIC": 2, + "IP_NONE": 3, + } +) + +func (x ExternalIPType) Enum() *ExternalIPType { + p := new(ExternalIPType) + *p = x + return p +} + +func (x ExternalIPType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ExternalIPType) Descriptor() protoreflect.EnumDescriptor { + return file_bpmetadata_ui_ext_proto_enumTypes[1].Descriptor() +} + +func (ExternalIPType) Type() protoreflect.EnumType { + return &file_bpmetadata_ui_ext_proto_enumTypes[1] +} + +func (x ExternalIPType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ExternalIPType.Descriptor instead. +func (ExternalIPType) EnumDescriptor() ([]byte, []int) { + return file_bpmetadata_ui_ext_proto_rawDescGZIP(), []int{1} +} + +// An extension for variables defined as part of DisplayVariable. The +// extension defines Google-specifc metadata necessary for choosing an +// appropriate input widget or adding restrictions to GCP-specific resources. +type GooglePropertyExtension struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Type specifies the type of extension. + // Gen: manually-authored + Type ExtensionType `protobuf:"varint,1,opt,name=type,proto3,enum=google.cloud.config.bpmetadata.ExtensionType" json:"type" yaml:"type"` // @gotags: json:"type" yaml:"type" + // Some properties (e.g. GCE_MACHINE_TYPE) require a zone context in order to + // determine the set of allowable values. This field references another + // property from the schema, which must have type GCE_ZONE. + // Gen: manually-authored + ZoneProperty string `protobuf:"bytes,2,opt,name=zone_property,json=zoneProperty,proto3" json:"zoneProperty,omitempty" yaml:"zoneProperty,omitempty"` // @gotags: json:"zoneProperty,omitempty" yaml:"zoneProperty,omitempty" + // Property-specific extensions. + // Gen: manually-authored (all property extensions and their child properties) + GceMachineType *GCEMachineTypeExtension `protobuf:"bytes,3,opt,name=gce_machine_type,json=gceMachineType,proto3" json:"gceMachineType,omitempty" yaml:"gceMachineType,omitempty"` // @gotags: json:"gceMachineType,omitempty" yaml:"gceMachineType,omitempty" + GceDiskSize *GCEDiskSizeExtension `protobuf:"bytes,4,opt,name=gce_disk_size,json=gceDiskSize,proto3" json:"gceDiskSize,omitempty" yaml:"gceDiskSize,omitempty"` // @gotags: json:"gceDiskSize,omitempty" yaml:"gceDiskSize,omitempty" + GceSubnetwork *GCESubnetworkExtension `protobuf:"bytes,5,opt,name=gce_subnetwork,json=gceSubnetwork,proto3" json:"gceSubnetwork,omitempty" yaml:"gceSubnetwork,omitempty"` // @gotags: json:"gceSubnetwork,omitempty" yaml:"gceSubnetwork,omitempty" + GceResource *GCEGenericResourceExtension `protobuf:"bytes,6,opt,name=gce_resource,json=gceResource,proto3" json:"gceResource,omitempty" yaml:"gceResource,omitempty"` // @gotags: json:"gceResource,omitempty" yaml:"gceResource,omitempty" + GceGpuType *GCEGPUTypeExtension `protobuf:"bytes,7,opt,name=gce_gpu_type,json=gceGpuType,proto3" json:"gceGpuType,omitempty" yaml:"gceGpuType,omitempty"` // @gotags: json:"gceGpuType,omitempty" yaml:"gceGpuType,omitempty" + GceGpuCount *GCEGPUCountExtension `protobuf:"bytes,8,opt,name=gce_gpu_count,json=gceGpuCount,proto3" json:"gceGpuCount,omitempty" yaml:"gceGpuCount,omitempty"` // @gotags: json:"gceGpuCount,omitempty" yaml:"gceGpuCount,omitempty" + GceNetwork *GCENetworkExtension `protobuf:"bytes,9,opt,name=gce_network,json=gceNetwork,proto3" json:"gceNetwork,omitempty" yaml:"gceNetwork,omitempty"` // @gotags: json:"gceNetwork,omitempty" yaml:"gceNetwork,omitempty" + GceExternalIp *GCEExternalIPExtension `protobuf:"bytes,10,opt,name=gce_external_ip,json=gceExternalIp,proto3" json:"gceExternalIp,omitempty" yaml:"gceExternalIp,omitempty"` // @gotags: json:"gceExternalIp,omitempty" yaml:"gceExternalIp,omitempty" + GceIpForwarding *GCEIPForwardingExtension `protobuf:"bytes,11,opt,name=gce_ip_forwarding,json=gceIpForwarding,proto3" json:"gceIpForwarding,omitempty" yaml:"gceIpForwarding,omitempty"` // @gotags: json:"gceIpForwarding,omitempty" yaml:"gceIpForwarding,omitempty" + GceFirewall *GCEFirewallExtension `protobuf:"bytes,12,opt,name=gce_firewall,json=gceFirewall,proto3" json:"gceFirewall,omitempty" yaml:"gceFirewall,omitempty"` // @gotags: json:"gceFirewall,omitempty" yaml:"gceFirewall,omitempty" + GceFirewallRange *GCEFirewallRangeExtension `protobuf:"bytes,13,opt,name=gce_firewall_range,json=gceFirewallRange,proto3" json:"gceFirewallRange,omitempty" yaml:"gceFirewallRange,omitempty"` // @gotags: json:"gceFirewallRange,omitempty" yaml:"gceFirewallRange,omitempty" + GceZone *GCELocationExtension `protobuf:"bytes,14,opt,name=gce_zone,json=gceZone,proto3" json:"gceZone,omitempty" yaml:"gceZone,omitempty"` // @gotags: json:"gceZone,omitempty" yaml:"gceZone,omitempty" + GceRegion *GCELocationExtension `protobuf:"bytes,15,opt,name=gce_region,json=gceRegion,proto3" json:"gceRegion,omitempty" yaml:"gceRegion,omitempty"` // @gotags: json:"gceRegion,omitempty" yaml:"gceRegion,omitempty" + IamServiceAccount *IAMServiceAccountExtension `protobuf:"bytes,16,opt,name=iam_service_account,json=iamServiceAccount,proto3" json:"iamServiceAccount,omitempty" yaml:"iamServiceAccount,omitempty"` // @gotags: json:"iamServiceAccount,omitempty" yaml:"iamServiceAccount,omitempty" + GceDiskType *GCEDiskTypeExtension `protobuf:"bytes,17,opt,name=gce_disk_type,json=gceDiskType,proto3" json:"gceDiskType,omitempty" yaml:"gceDiskType,omitempty"` // @gotags: json:"gceDiskType,omitempty" yaml:"gceDiskType,omitempty" + GceLocation *GCELocationExtension `protobuf:"bytes,18,opt,name=gce_location,json=gceLocation,proto3" json:"gceLocation,omitempty" yaml:"gceLocation,omitempty"` // @gotags: json:"gceLocation,omitempty" yaml:"gceLocation,omitempty" + GkeCluster *GKEClusterExtension `protobuf:"bytes,19,opt,name=gke_cluster,json=gkeCluster,proto3" json:"gkeCluster,omitempty" yaml:"gkeCluster,omitempty"` // @gotags: json:"gkeCluster,omitempty" yaml:"gkeCluster,omitempty" +} + +func (x *GooglePropertyExtension) Reset() { + *x = GooglePropertyExtension{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_ui_ext_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GooglePropertyExtension) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GooglePropertyExtension) ProtoMessage() {} + +func (x *GooglePropertyExtension) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_ui_ext_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GooglePropertyExtension.ProtoReflect.Descriptor instead. +func (*GooglePropertyExtension) Descriptor() ([]byte, []int) { + return file_bpmetadata_ui_ext_proto_rawDescGZIP(), []int{0} +} + +func (x *GooglePropertyExtension) GetType() ExtensionType { + if x != nil { + return x.Type + } + return ExtensionType_ET_UNDEFINED +} + +func (x *GooglePropertyExtension) GetZoneProperty() string { + if x != nil { + return x.ZoneProperty + } + return "" +} + +func (x *GooglePropertyExtension) GetGceMachineType() *GCEMachineTypeExtension { + if x != nil { + return x.GceMachineType + } + return nil +} + +func (x *GooglePropertyExtension) GetGceDiskSize() *GCEDiskSizeExtension { + if x != nil { + return x.GceDiskSize + } + return nil +} + +func (x *GooglePropertyExtension) GetGceSubnetwork() *GCESubnetworkExtension { + if x != nil { + return x.GceSubnetwork + } + return nil +} + +func (x *GooglePropertyExtension) GetGceResource() *GCEGenericResourceExtension { + if x != nil { + return x.GceResource + } + return nil +} + +func (x *GooglePropertyExtension) GetGceGpuType() *GCEGPUTypeExtension { + if x != nil { + return x.GceGpuType + } + return nil +} + +func (x *GooglePropertyExtension) GetGceGpuCount() *GCEGPUCountExtension { + if x != nil { + return x.GceGpuCount + } + return nil +} + +func (x *GooglePropertyExtension) GetGceNetwork() *GCENetworkExtension { + if x != nil { + return x.GceNetwork + } + return nil +} + +func (x *GooglePropertyExtension) GetGceExternalIp() *GCEExternalIPExtension { + if x != nil { + return x.GceExternalIp + } + return nil +} + +func (x *GooglePropertyExtension) GetGceIpForwarding() *GCEIPForwardingExtension { + if x != nil { + return x.GceIpForwarding + } + return nil +} + +func (x *GooglePropertyExtension) GetGceFirewall() *GCEFirewallExtension { + if x != nil { + return x.GceFirewall + } + return nil +} + +func (x *GooglePropertyExtension) GetGceFirewallRange() *GCEFirewallRangeExtension { + if x != nil { + return x.GceFirewallRange + } + return nil +} + +func (x *GooglePropertyExtension) GetGceZone() *GCELocationExtension { + if x != nil { + return x.GceZone + } + return nil +} + +func (x *GooglePropertyExtension) GetGceRegion() *GCELocationExtension { + if x != nil { + return x.GceRegion + } + return nil +} + +func (x *GooglePropertyExtension) GetIamServiceAccount() *IAMServiceAccountExtension { + if x != nil { + return x.IamServiceAccount + } + return nil +} + +func (x *GooglePropertyExtension) GetGceDiskType() *GCEDiskTypeExtension { + if x != nil { + return x.GceDiskType + } + return nil +} + +func (x *GooglePropertyExtension) GetGceLocation() *GCELocationExtension { + if x != nil { + return x.GceLocation + } + return nil +} + +func (x *GooglePropertyExtension) GetGkeCluster() *GKEClusterExtension { + if x != nil { + return x.GkeCluster + } + return nil +} + +// GCELocationExtension specifies a location extension for a Google Compute Engine (GCE) resource. +type GCELocationExtension struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // AllowlistedZones is a list of zones that are allowed for the resource. + AllowlistedZones []string `protobuf:"bytes,1,rep,name=allowlisted_zones,json=allowlistedZones,proto3" json:"allowlistedZones,omitempty" yaml:"allowlistedZones,omitempty"` // @gotags: json:"allowlistedZones,omitempty" yaml:"allowlistedZones,omitempty" + // AllowlistedRegions is a list of regions that are allowed for the resource. + AllowlistedRegions []string `protobuf:"bytes,2,rep,name=allowlisted_regions,json=allowlistedRegions,proto3" json:"allowlistedRegions,omitempty" yaml:"allowlistedRegions,omitempty"` // @gotags: json:"allowlistedRegions,omitempty" yaml:"allowlistedRegions,omitempty" +} + +func (x *GCELocationExtension) Reset() { + *x = GCELocationExtension{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_ui_ext_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GCELocationExtension) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GCELocationExtension) ProtoMessage() {} + +func (x *GCELocationExtension) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_ui_ext_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GCELocationExtension.ProtoReflect.Descriptor instead. +func (*GCELocationExtension) Descriptor() ([]byte, []int) { + return file_bpmetadata_ui_ext_proto_rawDescGZIP(), []int{1} +} + +func (x *GCELocationExtension) GetAllowlistedZones() []string { + if x != nil { + return x.AllowlistedZones + } + return nil +} + +func (x *GCELocationExtension) GetAllowlistedRegions() []string { + if x != nil { + return x.AllowlistedRegions + } + return nil +} + +// GCEMachineTypeExtension specifies a machine type extension for a GCE resource. +type GCEMachineTypeExtension struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Minimum cpu. Used to filter the list of selectable machine types. + MinCpu int32 `protobuf:"varint,1,opt,name=min_cpu,json=minCpu,proto3" json:"minCpu,omitempty" yaml:"minCpu,omitempty"` // @gotags: json:"minCpu,omitempty" yaml:"minCpu,omitempty" + // Minimum ram. Used to filter the list of selectable machine types. + MinRamGb float32 `protobuf:"fixed32,2,opt,name=min_ram_gb,json=minRamGb,proto3" json:"minRamGb,omitempty" yaml:"minRamGb,omitempty"` // @gotags: json:"minRamGb,omitempty" yaml:"minRamGb,omitempty" + // If true, custom machine types will not be selectable. + // More info: + // https://cloud.google.com/compute/docs/instances/creating-instance-with-custom-machine-type + DisallowCustomMachineTypes bool `protobuf:"varint,3,opt,name=disallow_custom_machine_types,json=disallowCustomMachineTypes,proto3" json:"disallowCustomMachineTypes,omitempty" yaml:"disallowCustomMachineTypes,omitempty"` // @gotags: json:"disallowCustomMachineTypes,omitempty" yaml:"disallowCustomMachineTypes,omitempty" + // Disk Image allows us to reference the image that is being used + // to help provide/gather data such as the image architecture. + DiskImageProperty string `protobuf:"bytes,4,opt,name=disk_image_property,json=diskImageProperty,proto3" json:"diskImageProperty,omitempty" yaml:"diskImageProperty,omitempty"` // @gotags: json:"diskImageProperty,omitempty" yaml:"diskImageProperty,omitempty" +} + +func (x *GCEMachineTypeExtension) Reset() { + *x = GCEMachineTypeExtension{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_ui_ext_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GCEMachineTypeExtension) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GCEMachineTypeExtension) ProtoMessage() {} + +func (x *GCEMachineTypeExtension) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_ui_ext_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GCEMachineTypeExtension.ProtoReflect.Descriptor instead. +func (*GCEMachineTypeExtension) Descriptor() ([]byte, []int) { + return file_bpmetadata_ui_ext_proto_rawDescGZIP(), []int{2} +} + +func (x *GCEMachineTypeExtension) GetMinCpu() int32 { + if x != nil { + return x.MinCpu + } + return 0 +} + +func (x *GCEMachineTypeExtension) GetMinRamGb() float32 { + if x != nil { + return x.MinRamGb + } + return 0 +} + +func (x *GCEMachineTypeExtension) GetDisallowCustomMachineTypes() bool { + if x != nil { + return x.DisallowCustomMachineTypes + } + return false +} + +func (x *GCEMachineTypeExtension) GetDiskImageProperty() string { + if x != nil { + return x.DiskImageProperty + } + return "" +} + +// GCEGPUTypeExtension specifies a GPU type extension for a GCE resource. +type GCEGPUTypeExtension struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // MachineType is the name of the machine type that the GPU is attached to. + MachineType string `protobuf:"bytes,1,opt,name=machine_type,json=machineType,proto3" json:"machineType" yaml:"machineType"` // @gotags: json:"machineType" yaml:"machineType" + // GPUType is the type(s) of GPU that is attached to the machine. + GpuType []string `protobuf:"bytes,2,rep,name=gpu_type,json=gpuType,proto3" json:"gpuType,omitempty" yaml:"gpuType,omitempty"` // @gotags: json:"gpuType,omitempty" yaml:"gpuType,omitempty" +} + +func (x *GCEGPUTypeExtension) Reset() { + *x = GCEGPUTypeExtension{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_ui_ext_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GCEGPUTypeExtension) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GCEGPUTypeExtension) ProtoMessage() {} + +func (x *GCEGPUTypeExtension) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_ui_ext_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GCEGPUTypeExtension.ProtoReflect.Descriptor instead. +func (*GCEGPUTypeExtension) Descriptor() ([]byte, []int) { + return file_bpmetadata_ui_ext_proto_rawDescGZIP(), []int{3} +} + +func (x *GCEGPUTypeExtension) GetMachineType() string { + if x != nil { + return x.MachineType + } + return "" +} + +func (x *GCEGPUTypeExtension) GetGpuType() []string { + if x != nil { + return x.GpuType + } + return nil +} + +// GCEGPUCountExtension specifies the number of GPUs that should be attached to a machine. +type GCEGPUCountExtension struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This field references another variable from the schema, + // which must have type GCEMachineType. + MachineTypeVariable string `protobuf:"bytes,1,opt,name=machine_type_variable,json=machineTypeVariable,proto3" json:"machineTypeVariable" yaml:"machineTypeVariable"` // @gotags: json:"machineTypeVariable" yaml:"machineTypeVariable" +} + +func (x *GCEGPUCountExtension) Reset() { + *x = GCEGPUCountExtension{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_ui_ext_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GCEGPUCountExtension) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GCEGPUCountExtension) ProtoMessage() {} + +func (x *GCEGPUCountExtension) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_ui_ext_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GCEGPUCountExtension.ProtoReflect.Descriptor instead. +func (*GCEGPUCountExtension) Descriptor() ([]byte, []int) { + return file_bpmetadata_ui_ext_proto_rawDescGZIP(), []int{4} +} + +func (x *GCEGPUCountExtension) GetMachineTypeVariable() string { + if x != nil { + return x.MachineTypeVariable + } + return "" +} + +// GCEDiskTypeExtension specifies the type of disk for a GCE resource. +type GCEDiskTypeExtension struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This field references another variable from the schema, + // which must have type GCEMachineType. + MachineTypeVariable string `protobuf:"bytes,1,opt,name=machine_type_variable,json=machineTypeVariable,proto3" json:"machineTypeVariable" yaml:"machineTypeVariable"` // @gotags: json:"machineTypeVariable" yaml:"machineTypeVariable" +} + +func (x *GCEDiskTypeExtension) Reset() { + *x = GCEDiskTypeExtension{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_ui_ext_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GCEDiskTypeExtension) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GCEDiskTypeExtension) ProtoMessage() {} + +func (x *GCEDiskTypeExtension) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_ui_ext_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GCEDiskTypeExtension.ProtoReflect.Descriptor instead. +func (*GCEDiskTypeExtension) Descriptor() ([]byte, []int) { + return file_bpmetadata_ui_ext_proto_rawDescGZIP(), []int{5} +} + +func (x *GCEDiskTypeExtension) GetMachineTypeVariable() string { + if x != nil { + return x.MachineTypeVariable + } + return "" +} + +// GCEDiskSizeExtension specifies the size of a disk for a GCE resource. +type GCEDiskSizeExtension struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The allowable range of disk sizes depends on the disk type. This field + // references another variable from the schema, which must have type GCEDiskType. + DiskTypeVariable string `protobuf:"bytes,1,opt,name=disk_type_variable,json=diskTypeVariable,proto3" json:"diskTypeVariable" yaml:"diskTypeVariable"` // @gotags: json:"diskTypeVariable" yaml:"diskTypeVariable" +} + +func (x *GCEDiskSizeExtension) Reset() { + *x = GCEDiskSizeExtension{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_ui_ext_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GCEDiskSizeExtension) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GCEDiskSizeExtension) ProtoMessage() {} + +func (x *GCEDiskSizeExtension) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_ui_ext_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GCEDiskSizeExtension.ProtoReflect.Descriptor instead. +func (*GCEDiskSizeExtension) Descriptor() ([]byte, []int) { + return file_bpmetadata_ui_ext_proto_rawDescGZIP(), []int{6} +} + +func (x *GCEDiskSizeExtension) GetDiskTypeVariable() string { + if x != nil { + return x.DiskTypeVariable + } + return "" +} + +// GCENetworkExtension specifies a network extension for a GCE resource. +type GCENetworkExtension struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // AllowSharedVpcs indicates this solution can receive + // shared VPC selflinks (fully qualified compute links). + AllowSharedVpcs bool `protobuf:"varint,1,opt,name=allow_shared_vpcs,json=allowSharedVpcs,proto3" json:"allowSharedVpcs,omitempty" yaml:"allowSharedVpcs,omitempty"` // @gotags: json:"allowSharedVpcs,omitempty" yaml:"allowSharedVpcs,omitempty" + // Used to indicate to which machine type this network interface will be + // attached to. + MachineTypeVariable string `protobuf:"bytes,2,opt,name=machine_type_variable,json=machineTypeVariable,proto3" json:"machineTypeVariable" yaml:"machineTypeVariable"` // @gotags: json:"machineTypeVariable" yaml:"machineTypeVariable" + // Label that will be in front of each Network Interface. + Labels []string `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty" yaml:"labels,omitempty"` // @gotags: json:"labels,omitempty" yaml:"labels,omitempty" +} + +func (x *GCENetworkExtension) Reset() { + *x = GCENetworkExtension{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_ui_ext_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GCENetworkExtension) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GCENetworkExtension) ProtoMessage() {} + +func (x *GCENetworkExtension) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_ui_ext_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GCENetworkExtension.ProtoReflect.Descriptor instead. +func (*GCENetworkExtension) Descriptor() ([]byte, []int) { + return file_bpmetadata_ui_ext_proto_rawDescGZIP(), []int{7} +} + +func (x *GCENetworkExtension) GetAllowSharedVpcs() bool { + if x != nil { + return x.AllowSharedVpcs + } + return false +} + +func (x *GCENetworkExtension) GetMachineTypeVariable() string { + if x != nil { + return x.MachineTypeVariable + } + return "" +} + +func (x *GCENetworkExtension) GetLabels() []string { + if x != nil { + return x.Labels + } + return nil +} + +type GCEExternalIPExtension struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // NetworkVariable is the name of the network variable that the external IP address belongs to. + NetworkVariable string `protobuf:"bytes,1,opt,name=network_variable,json=networkVariable,proto3" json:"networkVariable" yaml:"networkVariable"` // @gotags: json:"networkVariable" yaml:"networkVariable" + // Type specifies the type of external IP address. Defaults to EPHEMERAL if not specified. + Type ExternalIPType `protobuf:"varint,2,opt,name=type,proto3,enum=google.cloud.config.bpmetadata.ExternalIPType" json:"type,omitempty" yaml:"type,omitempty"` // @gotags: json:"type,omitempty" yaml:"type,omitempty" + // Flag to denote if an external IP should be configurable. + NotConfigurable bool `protobuf:"varint,3,opt,name=not_configurable,json=notConfigurable,proto3" json:"notConfigurable,omitempty" yaml:"notConfigurable,omitempty"` // @gotags: json:"notConfigurable,omitempty" yaml:"notConfigurable,omitempty" + // Flag to denote if static IPs are allowed for the external IP. + AllowStaticIps bool `protobuf:"varint,4,opt,name=allow_static_ips,json=allowStaticIps,proto3" json:"allowStaticIps,omitempty" yaml:"allowStaticIps,omitempty"` // @gotags: json:"allowStaticIps,omitempty" yaml:"allowStaticIps,omitempty" +} + +func (x *GCEExternalIPExtension) Reset() { + *x = GCEExternalIPExtension{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_ui_ext_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GCEExternalIPExtension) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GCEExternalIPExtension) ProtoMessage() {} + +func (x *GCEExternalIPExtension) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_ui_ext_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GCEExternalIPExtension.ProtoReflect.Descriptor instead. +func (*GCEExternalIPExtension) Descriptor() ([]byte, []int) { + return file_bpmetadata_ui_ext_proto_rawDescGZIP(), []int{8} +} + +func (x *GCEExternalIPExtension) GetNetworkVariable() string { + if x != nil { + return x.NetworkVariable + } + return "" +} + +func (x *GCEExternalIPExtension) GetType() ExternalIPType { + if x != nil { + return x.Type + } + return ExternalIPType_IP_UNSPECIFIED +} + +func (x *GCEExternalIPExtension) GetNotConfigurable() bool { + if x != nil { + return x.NotConfigurable + } + return false +} + +func (x *GCEExternalIPExtension) GetAllowStaticIps() bool { + if x != nil { + return x.AllowStaticIps + } + return false +} + +// GCEIPForwardingExtension specifies an IP forwarding extension for a GCE resource. +type GCEIPForwardingExtension struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // NetworkVariable is the name of the network variable that the IP forwarding belongs to. + NetworkVariable string `protobuf:"bytes,1,opt,name=network_variable,json=networkVariable,proto3" json:"networkVariable" yaml:"networkVariable"` // @gotags: json:"networkVariable" yaml:"networkVariable" + // NotConfigurable specifies whether the IP forwarding is configurable. Defaults to false if not specified. + NotConfigurable bool `protobuf:"varint,2,opt,name=not_configurable,json=notConfigurable,proto3" json:"notConfigurable,omitempty" yaml:"notConfigurable,omitempty"` // @gotags: json:"notConfigurable,omitempty" yaml:"notConfigurable,omitempty" +} + +func (x *GCEIPForwardingExtension) Reset() { + *x = GCEIPForwardingExtension{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_ui_ext_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GCEIPForwardingExtension) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GCEIPForwardingExtension) ProtoMessage() {} + +func (x *GCEIPForwardingExtension) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_ui_ext_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GCEIPForwardingExtension.ProtoReflect.Descriptor instead. +func (*GCEIPForwardingExtension) Descriptor() ([]byte, []int) { + return file_bpmetadata_ui_ext_proto_rawDescGZIP(), []int{9} +} + +func (x *GCEIPForwardingExtension) GetNetworkVariable() string { + if x != nil { + return x.NetworkVariable + } + return "" +} + +func (x *GCEIPForwardingExtension) GetNotConfigurable() bool { + if x != nil { + return x.NotConfigurable + } + return false +} + +type GCEFirewallExtension struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // NetworkVariable is used to indicate the network variable in the schema + // this external IP belongs to. + NetworkVariable string `protobuf:"bytes,1,opt,name=network_variable,json=networkVariable,proto3" json:"networkVariable" yaml:"networkVariable"` // @gotags: json:"networkVariable" yaml:"networkVariable" +} + +func (x *GCEFirewallExtension) Reset() { + *x = GCEFirewallExtension{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_ui_ext_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GCEFirewallExtension) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GCEFirewallExtension) ProtoMessage() {} + +func (x *GCEFirewallExtension) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_ui_ext_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GCEFirewallExtension.ProtoReflect.Descriptor instead. +func (*GCEFirewallExtension) Descriptor() ([]byte, []int) { + return file_bpmetadata_ui_ext_proto_rawDescGZIP(), []int{10} +} + +func (x *GCEFirewallExtension) GetNetworkVariable() string { + if x != nil { + return x.NetworkVariable + } + return "" +} + +type GCEFirewallRangeExtension struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // FirewallVariable is used to indicate the firewall variable with the type + // GCEFirewall in the schema to which this firewall range belongs to. + FirewallVariable string `protobuf:"bytes,1,opt,name=firewall_variable,json=firewallVariable,proto3" json:"firewallVariable" yaml:"firewallVariable"` // @gotags: json:"firewallVariable" yaml:"firewallVariable" +} + +func (x *GCEFirewallRangeExtension) Reset() { + *x = GCEFirewallRangeExtension{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_ui_ext_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GCEFirewallRangeExtension) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GCEFirewallRangeExtension) ProtoMessage() {} + +func (x *GCEFirewallRangeExtension) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_ui_ext_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GCEFirewallRangeExtension.ProtoReflect.Descriptor instead. +func (*GCEFirewallRangeExtension) Descriptor() ([]byte, []int) { + return file_bpmetadata_ui_ext_proto_rawDescGZIP(), []int{11} +} + +func (x *GCEFirewallRangeExtension) GetFirewallVariable() string { + if x != nil { + return x.FirewallVariable + } + return "" +} + +type GCESubnetworkExtension struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Subnetwork variable requires a network context in order to determine the + // set of available subnetworks. This field references another + // variable from the schema, which must have type GCENetwork. + NetworkVariable string `protobuf:"bytes,1,opt,name=network_variable,json=networkVariable,proto3" json:"networkVariable" yaml:"networkVariable"` // @gotags: json:"networkVariable" yaml:"networkVariable" +} + +func (x *GCESubnetworkExtension) Reset() { + *x = GCESubnetworkExtension{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_ui_ext_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GCESubnetworkExtension) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GCESubnetworkExtension) ProtoMessage() {} + +func (x *GCESubnetworkExtension) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_ui_ext_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GCESubnetworkExtension.ProtoReflect.Descriptor instead. +func (*GCESubnetworkExtension) Descriptor() ([]byte, []int) { + return file_bpmetadata_ui_ext_proto_rawDescGZIP(), []int{12} +} + +func (x *GCESubnetworkExtension) GetNetworkVariable() string { + if x != nil { + return x.NetworkVariable + } + return "" +} + +type GCEGenericResourceExtension struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // GCE resource type to be fetched. This field references another + // property from the schema, which must have type GCEGenericResource. + ResourceVariable string `protobuf:"bytes,1,opt,name=resource_variable,json=resourceVariable,proto3" json:"resourceVariable" yaml:"resourceVariable"` // @gotags: json:"resourceVariable" yaml:"resourceVariable" +} + +func (x *GCEGenericResourceExtension) Reset() { + *x = GCEGenericResourceExtension{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_ui_ext_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GCEGenericResourceExtension) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GCEGenericResourceExtension) ProtoMessage() {} + +func (x *GCEGenericResourceExtension) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_ui_ext_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GCEGenericResourceExtension.ProtoReflect.Descriptor instead. +func (*GCEGenericResourceExtension) Descriptor() ([]byte, []int) { + return file_bpmetadata_ui_ext_proto_rawDescGZIP(), []int{13} +} + +func (x *GCEGenericResourceExtension) GetResourceVariable() string { + if x != nil { + return x.ResourceVariable + } + return "" +} + +type IAMServiceAccountExtension struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // List of IAM roles that to grant to a new SA, or the roles to filter + // existing SAs with. + Roles []string `protobuf:"bytes,1,rep,name=roles,proto3" json:"roles" yaml:"roles"` // @gotags: json:"roles" yaml:"roles" +} + +func (x *IAMServiceAccountExtension) Reset() { + *x = IAMServiceAccountExtension{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_ui_ext_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *IAMServiceAccountExtension) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IAMServiceAccountExtension) ProtoMessage() {} + +func (x *IAMServiceAccountExtension) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_ui_ext_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IAMServiceAccountExtension.ProtoReflect.Descriptor instead. +func (*IAMServiceAccountExtension) Descriptor() ([]byte, []int) { + return file_bpmetadata_ui_ext_proto_rawDescGZIP(), []int{14} +} + +func (x *IAMServiceAccountExtension) GetRoles() []string { + if x != nil { + return x.Roles + } + return nil +} + +type GKEClusterExtension struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // GKE Cluster variable to be used for gathering context needed to select/create + // a GKE Cluster for GKE AI Products. + LocationVariable string `protobuf:"bytes,1,opt,name=location_variable,json=locationVariable,proto3" json:"locationVariable" yaml:"locationVariable"` // @gotags: json:"locationVariable" yaml:"locationVariable" + // Variable that will indicate if we are creating a cluster or using an existing one. + ClusterCreationVariable string `protobuf:"bytes,2,opt,name=cluster_creation_variable,json=clusterCreationVariable,proto3" json:"clusterCreationVariable" yaml:"clusterCreationVariable"` // @gotags: json:"clusterCreationVariable" yaml:"clusterCreationVariable" +} + +func (x *GKEClusterExtension) Reset() { + *x = GKEClusterExtension{} + if protoimpl.UnsafeEnabled { + mi := &file_bpmetadata_ui_ext_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GKEClusterExtension) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GKEClusterExtension) ProtoMessage() {} + +func (x *GKEClusterExtension) ProtoReflect() protoreflect.Message { + mi := &file_bpmetadata_ui_ext_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GKEClusterExtension.ProtoReflect.Descriptor instead. +func (*GKEClusterExtension) Descriptor() ([]byte, []int) { + return file_bpmetadata_ui_ext_proto_rawDescGZIP(), []int{15} +} + +func (x *GKEClusterExtension) GetLocationVariable() string { + if x != nil { + return x.LocationVariable + } + return "" +} + +func (x *GKEClusterExtension) GetClusterCreationVariable() string { + if x != nil { + return x.ClusterCreationVariable + } + return "" +} + +var File_bpmetadata_ui_ext_proto protoreflect.FileDescriptor + +var file_bpmetadata_ui_ext_proto_rawDesc = []byte{ + 0x0a, 0x17, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x75, 0x69, 0x5f, + 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, + 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0xa7, 0x0d, 0x0a, 0x17, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x7a, 0x6f, 0x6e, 0x65, + 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x7a, 0x6f, 0x6e, 0x65, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x12, 0x61, 0x0a, + 0x10, 0x67, 0x63, 0x65, 0x5f, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x43, 0x45, 0x4d, 0x61, 0x63, 0x68, + 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x0e, 0x67, 0x63, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x58, 0x0a, 0x0d, 0x67, 0x63, 0x65, 0x5f, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x43, 0x45, 0x44, 0x69, 0x73, 0x6b, + 0x53, 0x69, 0x7a, 0x65, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x67, + 0x63, 0x65, 0x44, 0x69, 0x73, 0x6b, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x5d, 0x0a, 0x0e, 0x67, 0x63, + 0x65, 0x5f, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x47, 0x43, 0x45, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, + 0x6b, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x67, 0x63, 0x65, 0x53, + 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x5e, 0x0a, 0x0c, 0x67, 0x63, 0x65, + 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x47, 0x43, 0x45, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x67, 0x63, + 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x55, 0x0a, 0x0c, 0x67, 0x63, 0x65, + 0x5f, 0x67, 0x70, 0x75, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x47, 0x43, 0x45, 0x47, 0x50, 0x55, 0x54, 0x79, 0x70, 0x65, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x67, 0x63, 0x65, 0x47, 0x70, 0x75, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x58, 0x0a, 0x0d, 0x67, 0x63, 0x65, 0x5f, 0x67, 0x70, 0x75, 0x5f, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x43, 0x45, 0x47, 0x50, 0x55, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x67, + 0x63, 0x65, 0x47, 0x70, 0x75, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x54, 0x0a, 0x0b, 0x67, 0x63, + 0x65, 0x5f, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x47, 0x43, 0x45, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x67, 0x63, 0x65, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, + 0x12, 0x5e, 0x0a, 0x0f, 0x67, 0x63, 0x65, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x5f, 0x69, 0x70, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x43, 0x45, 0x45, 0x78, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x50, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x0d, 0x67, 0x63, 0x65, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x70, + 0x12, 0x64, 0x0a, 0x11, 0x67, 0x63, 0x65, 0x5f, 0x69, 0x70, 0x5f, 0x66, 0x6f, 0x72, 0x77, 0x61, + 0x72, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x43, 0x45, + 0x49, 0x50, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x67, 0x63, 0x65, 0x49, 0x70, 0x46, 0x6f, 0x72, 0x77, + 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x57, 0x0a, 0x0c, 0x67, 0x63, 0x65, 0x5f, 0x66, 0x69, + 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x43, + 0x45, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x0b, 0x67, 0x63, 0x65, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x12, + 0x67, 0x0a, 0x12, 0x67, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x5f, + 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x43, 0x45, + 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x67, 0x63, 0x65, 0x46, 0x69, 0x72, 0x65, 0x77, + 0x61, 0x6c, 0x6c, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x4f, 0x0a, 0x08, 0x67, 0x63, 0x65, 0x5f, + 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x43, 0x45, 0x4c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x07, 0x67, 0x63, 0x65, 0x5a, 0x6f, 0x6e, 0x65, 0x12, 0x53, 0x0a, 0x0a, 0x67, 0x63, 0x65, + 0x5f, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, + 0x43, 0x45, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x67, 0x63, 0x65, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x12, 0x6a, + 0x0a, 0x13, 0x69, 0x61, 0x6d, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x49, 0x41, 0x4d, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x69, 0x61, 0x6d, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x58, 0x0a, 0x0d, 0x67, 0x63, + 0x65, 0x5f, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x47, 0x43, 0x45, 0x44, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x67, 0x63, 0x65, 0x44, 0x69, 0x73, 0x6b, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x57, 0x0a, 0x0c, 0x67, 0x63, 0x65, 0x5f, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x43, 0x45, 0x4c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x0b, 0x67, 0x63, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x54, 0x0a, + 0x0b, 0x67, 0x6b, 0x65, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x47, 0x4b, 0x45, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x67, 0x6b, 0x65, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x22, 0x74, 0x0a, 0x14, 0x47, 0x43, 0x45, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x7a, 0x6f, 0x6e, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x6c, 0x69, 0x73, + 0x74, 0x65, 0x64, 0x5a, 0x6f, 0x6e, 0x65, 0x73, 0x12, 0x2f, 0x0a, 0x13, 0x61, 0x6c, 0x6c, 0x6f, + 0x77, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x6c, 0x69, 0x73, 0x74, + 0x65, 0x64, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xc3, 0x01, 0x0a, 0x17, 0x47, 0x43, + 0x45, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x5f, 0x63, 0x70, 0x75, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6d, 0x69, 0x6e, 0x43, 0x70, 0x75, 0x12, 0x1c, + 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x72, 0x61, 0x6d, 0x5f, 0x67, 0x62, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x02, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x52, 0x61, 0x6d, 0x47, 0x62, 0x12, 0x41, 0x0a, 0x1d, + 0x64, 0x69, 0x73, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, + 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x1a, 0x64, 0x69, 0x73, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x43, 0x75, 0x73, + 0x74, 0x6f, 0x6d, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, + 0x2e, 0x0a, 0x13, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x70, 0x72, + 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x64, 0x69, + 0x73, 0x6b, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x22, + 0x53, 0x0a, 0x13, 0x47, 0x43, 0x45, 0x47, 0x50, 0x55, 0x54, 0x79, 0x70, 0x65, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, + 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6d, 0x61, + 0x63, 0x68, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x67, 0x70, 0x75, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x67, 0x70, 0x75, + 0x54, 0x79, 0x70, 0x65, 0x22, 0x4a, 0x0a, 0x14, 0x47, 0x43, 0x45, 0x47, 0x50, 0x55, 0x43, 0x6f, + 0x75, 0x6e, 0x74, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x15, + 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x76, 0x61, 0x72, + 0x69, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x6d, 0x61, 0x63, + 0x68, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, + 0x22, 0x4a, 0x0a, 0x14, 0x47, 0x43, 0x45, 0x44, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x15, 0x6d, 0x61, 0x63, 0x68, + 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, + 0x54, 0x79, 0x70, 0x65, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x22, 0x44, 0x0a, 0x14, + 0x47, 0x43, 0x45, 0x44, 0x69, 0x73, 0x6b, 0x53, 0x69, 0x7a, 0x65, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x12, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x5f, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x10, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, + 0x6c, 0x65, 0x22, 0x8d, 0x01, 0x0a, 0x13, 0x47, 0x43, 0x45, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, + 0x6b, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x11, 0x61, 0x6c, + 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x5f, 0x76, 0x70, 0x63, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x53, 0x68, 0x61, 0x72, + 0x65, 0x64, 0x56, 0x70, 0x63, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, + 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x54, 0x79, + 0x70, 0x65, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x22, 0xdc, 0x01, 0x0a, 0x16, 0x47, 0x43, 0x45, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x49, 0x50, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x0a, + 0x10, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, + 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x42, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x70, 0x6d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x49, 0x50, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, + 0x6e, 0x6f, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x6e, 0x6f, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x61, 0x6c, 0x6c, 0x6f, 0x77, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x5f, 0x69, 0x70, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0e, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x49, 0x70, + 0x73, 0x22, 0x70, 0x0a, 0x18, 0x47, 0x43, 0x45, 0x49, 0x50, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, + 0x64, 0x69, 0x6e, 0x67, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x0a, + 0x10, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, + 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x6e, 0x6f, 0x74, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0f, 0x6e, 0x6f, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, + 0x62, 0x6c, 0x65, 0x22, 0x41, 0x0a, 0x14, 0x47, 0x43, 0x45, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, + 0x6c, 0x6c, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6e, + 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x56, 0x61, + 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x22, 0x48, 0x0a, 0x19, 0x47, 0x43, 0x45, 0x46, 0x69, 0x72, + 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x66, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x5f, + 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, + 0x66, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, + 0x22, 0x43, 0x0a, 0x16, 0x47, 0x43, 0x45, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, + 0x6b, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6e, 0x65, + 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x56, 0x61, 0x72, + 0x69, 0x61, 0x62, 0x6c, 0x65, 0x22, 0x4a, 0x0a, 0x1b, 0x47, 0x43, 0x45, 0x47, 0x65, 0x6e, 0x65, + 0x72, 0x69, 0x63, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x10, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, + 0x65, 0x22, 0x32, 0x0a, 0x1a, 0x49, 0x41, 0x4d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, + 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x14, 0x0a, 0x05, 0x72, 0x6f, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, + 0x72, 0x6f, 0x6c, 0x65, 0x73, 0x22, 0x7e, 0x0a, 0x13, 0x47, 0x4b, 0x45, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x3a, 0x0a, 0x19, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x61, + 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x61, 0x72, + 0x69, 0x61, 0x62, 0x6c, 0x65, 0x2a, 0x96, 0x04, 0x0a, 0x0d, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x0c, 0x45, 0x54, 0x5f, 0x55, 0x4e, + 0x44, 0x45, 0x46, 0x49, 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x45, 0x54, 0x5f, + 0x45, 0x4d, 0x41, 0x49, 0x4c, 0x5f, 0x41, 0x44, 0x44, 0x52, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, + 0x18, 0x0a, 0x14, 0x45, 0x54, 0x5f, 0x4d, 0x55, 0x4c, 0x54, 0x49, 0x5f, 0x4c, 0x49, 0x4e, 0x45, + 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x16, 0x0a, 0x12, 0x45, 0x54, 0x5f, + 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x5f, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, + 0x15, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x54, 0x5f, 0x47, 0x43, 0x45, 0x5f, 0x44, 0x49, 0x53, 0x4b, + 0x5f, 0x49, 0x4d, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x14, 0x0a, 0x10, 0x45, 0x54, 0x5f, 0x47, + 0x43, 0x45, 0x5f, 0x44, 0x49, 0x53, 0x4b, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x10, 0x04, 0x12, 0x14, + 0x0a, 0x10, 0x45, 0x54, 0x5f, 0x47, 0x43, 0x45, 0x5f, 0x44, 0x49, 0x53, 0x4b, 0x5f, 0x53, 0x49, + 0x5a, 0x45, 0x10, 0x05, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x54, 0x5f, 0x47, 0x43, 0x45, 0x5f, 0x4d, + 0x41, 0x43, 0x48, 0x49, 0x4e, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x10, 0x06, 0x12, 0x12, 0x0a, + 0x0e, 0x45, 0x54, 0x5f, 0x47, 0x43, 0x45, 0x5f, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x10, + 0x07, 0x12, 0x0f, 0x0a, 0x0b, 0x45, 0x54, 0x5f, 0x47, 0x43, 0x45, 0x5f, 0x5a, 0x4f, 0x4e, 0x45, + 0x10, 0x08, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x54, 0x5f, 0x47, 0x43, 0x45, 0x5f, 0x53, 0x55, 0x42, + 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x10, 0x09, 0x12, 0x11, 0x0a, 0x0d, 0x45, 0x54, 0x5f, + 0x47, 0x43, 0x45, 0x5f, 0x52, 0x45, 0x47, 0x49, 0x4f, 0x4e, 0x10, 0x0a, 0x12, 0x13, 0x0a, 0x0f, + 0x45, 0x54, 0x5f, 0x47, 0x43, 0x45, 0x5f, 0x47, 0x50, 0x55, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x10, + 0x0b, 0x12, 0x14, 0x0a, 0x10, 0x45, 0x54, 0x5f, 0x47, 0x43, 0x45, 0x5f, 0x47, 0x50, 0x55, 0x5f, + 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x10, 0x0c, 0x12, 0x16, 0x0a, 0x12, 0x45, 0x54, 0x5f, 0x47, 0x43, + 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x49, 0x50, 0x10, 0x0d, 0x12, + 0x18, 0x0a, 0x14, 0x45, 0x54, 0x5f, 0x47, 0x43, 0x45, 0x5f, 0x49, 0x50, 0x5f, 0x46, 0x4f, 0x52, + 0x57, 0x41, 0x52, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x0e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x54, 0x5f, + 0x47, 0x43, 0x45, 0x5f, 0x46, 0x49, 0x52, 0x45, 0x57, 0x41, 0x4c, 0x4c, 0x10, 0x0f, 0x12, 0x19, + 0x0a, 0x15, 0x45, 0x54, 0x5f, 0x47, 0x43, 0x45, 0x5f, 0x46, 0x49, 0x52, 0x45, 0x57, 0x41, 0x4c, + 0x4c, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x10, 0x12, 0x1b, 0x0a, 0x17, 0x45, 0x54, 0x5f, + 0x47, 0x43, 0x45, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x49, 0x43, 0x5f, 0x52, 0x45, 0x53, 0x4f, + 0x55, 0x52, 0x43, 0x45, 0x10, 0x11, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x54, 0x5f, 0x47, 0x43, 0x45, + 0x5f, 0x4c, 0x4f, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x16, 0x12, 0x11, 0x0a, 0x0d, 0x45, + 0x54, 0x5f, 0x47, 0x43, 0x53, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x12, 0x12, 0x1a, + 0x0a, 0x16, 0x45, 0x54, 0x5f, 0x49, 0x41, 0x4d, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, + 0x5f, 0x41, 0x43, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x10, 0x13, 0x12, 0x12, 0x0a, 0x0e, 0x45, 0x54, + 0x5f, 0x47, 0x4b, 0x45, 0x5f, 0x43, 0x4c, 0x55, 0x53, 0x54, 0x45, 0x52, 0x10, 0x14, 0x2a, 0x52, + 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x50, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x12, 0x0a, 0x0e, 0x49, 0x50, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, + 0x45, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x50, 0x5f, 0x45, 0x50, 0x48, 0x45, 0x4d, + 0x45, 0x52, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x49, 0x50, 0x5f, 0x53, 0x54, 0x41, + 0x54, 0x49, 0x43, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x50, 0x5f, 0x4e, 0x4f, 0x4e, 0x45, + 0x10, 0x03, 0x42, 0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x50, 0x6c, 0x61, 0x74, + 0x66, 0x6f, 0x72, 0x6d, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x66, 0x6f, 0x75, 0x6e, 0x64, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2d, 0x74, 0x6f, 0x6f, 0x6c, 0x6b, 0x69, 0x74, 0x2f, 0x63, 0x6c, + 0x69, 0x2f, 0x62, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_bpmetadata_ui_ext_proto_rawDescOnce sync.Once + file_bpmetadata_ui_ext_proto_rawDescData = file_bpmetadata_ui_ext_proto_rawDesc +) + +func file_bpmetadata_ui_ext_proto_rawDescGZIP() []byte { + file_bpmetadata_ui_ext_proto_rawDescOnce.Do(func() { + file_bpmetadata_ui_ext_proto_rawDescData = protoimpl.X.CompressGZIP(file_bpmetadata_ui_ext_proto_rawDescData) + }) + return file_bpmetadata_ui_ext_proto_rawDescData +} + +var file_bpmetadata_ui_ext_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_bpmetadata_ui_ext_proto_msgTypes = make([]protoimpl.MessageInfo, 16) +var file_bpmetadata_ui_ext_proto_goTypes = []interface{}{ + (ExtensionType)(0), // 0: google.cloud.config.bpmetadata.ExtensionType + (ExternalIPType)(0), // 1: google.cloud.config.bpmetadata.ExternalIPType + (*GooglePropertyExtension)(nil), // 2: google.cloud.config.bpmetadata.GooglePropertyExtension + (*GCELocationExtension)(nil), // 3: google.cloud.config.bpmetadata.GCELocationExtension + (*GCEMachineTypeExtension)(nil), // 4: google.cloud.config.bpmetadata.GCEMachineTypeExtension + (*GCEGPUTypeExtension)(nil), // 5: google.cloud.config.bpmetadata.GCEGPUTypeExtension + (*GCEGPUCountExtension)(nil), // 6: google.cloud.config.bpmetadata.GCEGPUCountExtension + (*GCEDiskTypeExtension)(nil), // 7: google.cloud.config.bpmetadata.GCEDiskTypeExtension + (*GCEDiskSizeExtension)(nil), // 8: google.cloud.config.bpmetadata.GCEDiskSizeExtension + (*GCENetworkExtension)(nil), // 9: google.cloud.config.bpmetadata.GCENetworkExtension + (*GCEExternalIPExtension)(nil), // 10: google.cloud.config.bpmetadata.GCEExternalIPExtension + (*GCEIPForwardingExtension)(nil), // 11: google.cloud.config.bpmetadata.GCEIPForwardingExtension + (*GCEFirewallExtension)(nil), // 12: google.cloud.config.bpmetadata.GCEFirewallExtension + (*GCEFirewallRangeExtension)(nil), // 13: google.cloud.config.bpmetadata.GCEFirewallRangeExtension + (*GCESubnetworkExtension)(nil), // 14: google.cloud.config.bpmetadata.GCESubnetworkExtension + (*GCEGenericResourceExtension)(nil), // 15: google.cloud.config.bpmetadata.GCEGenericResourceExtension + (*IAMServiceAccountExtension)(nil), // 16: google.cloud.config.bpmetadata.IAMServiceAccountExtension + (*GKEClusterExtension)(nil), // 17: google.cloud.config.bpmetadata.GKEClusterExtension +} +var file_bpmetadata_ui_ext_proto_depIdxs = []int32{ + 0, // 0: google.cloud.config.bpmetadata.GooglePropertyExtension.type:type_name -> google.cloud.config.bpmetadata.ExtensionType + 4, // 1: google.cloud.config.bpmetadata.GooglePropertyExtension.gce_machine_type:type_name -> google.cloud.config.bpmetadata.GCEMachineTypeExtension + 8, // 2: google.cloud.config.bpmetadata.GooglePropertyExtension.gce_disk_size:type_name -> google.cloud.config.bpmetadata.GCEDiskSizeExtension + 14, // 3: google.cloud.config.bpmetadata.GooglePropertyExtension.gce_subnetwork:type_name -> google.cloud.config.bpmetadata.GCESubnetworkExtension + 15, // 4: google.cloud.config.bpmetadata.GooglePropertyExtension.gce_resource:type_name -> google.cloud.config.bpmetadata.GCEGenericResourceExtension + 5, // 5: google.cloud.config.bpmetadata.GooglePropertyExtension.gce_gpu_type:type_name -> google.cloud.config.bpmetadata.GCEGPUTypeExtension + 6, // 6: google.cloud.config.bpmetadata.GooglePropertyExtension.gce_gpu_count:type_name -> google.cloud.config.bpmetadata.GCEGPUCountExtension + 9, // 7: google.cloud.config.bpmetadata.GooglePropertyExtension.gce_network:type_name -> google.cloud.config.bpmetadata.GCENetworkExtension + 10, // 8: google.cloud.config.bpmetadata.GooglePropertyExtension.gce_external_ip:type_name -> google.cloud.config.bpmetadata.GCEExternalIPExtension + 11, // 9: google.cloud.config.bpmetadata.GooglePropertyExtension.gce_ip_forwarding:type_name -> google.cloud.config.bpmetadata.GCEIPForwardingExtension + 12, // 10: google.cloud.config.bpmetadata.GooglePropertyExtension.gce_firewall:type_name -> google.cloud.config.bpmetadata.GCEFirewallExtension + 13, // 11: google.cloud.config.bpmetadata.GooglePropertyExtension.gce_firewall_range:type_name -> google.cloud.config.bpmetadata.GCEFirewallRangeExtension + 3, // 12: google.cloud.config.bpmetadata.GooglePropertyExtension.gce_zone:type_name -> google.cloud.config.bpmetadata.GCELocationExtension + 3, // 13: google.cloud.config.bpmetadata.GooglePropertyExtension.gce_region:type_name -> google.cloud.config.bpmetadata.GCELocationExtension + 16, // 14: google.cloud.config.bpmetadata.GooglePropertyExtension.iam_service_account:type_name -> google.cloud.config.bpmetadata.IAMServiceAccountExtension + 7, // 15: google.cloud.config.bpmetadata.GooglePropertyExtension.gce_disk_type:type_name -> google.cloud.config.bpmetadata.GCEDiskTypeExtension + 3, // 16: google.cloud.config.bpmetadata.GooglePropertyExtension.gce_location:type_name -> google.cloud.config.bpmetadata.GCELocationExtension + 17, // 17: google.cloud.config.bpmetadata.GooglePropertyExtension.gke_cluster:type_name -> google.cloud.config.bpmetadata.GKEClusterExtension + 1, // 18: google.cloud.config.bpmetadata.GCEExternalIPExtension.type:type_name -> google.cloud.config.bpmetadata.ExternalIPType + 19, // [19:19] is the sub-list for method output_type + 19, // [19:19] is the sub-list for method input_type + 19, // [19:19] is the sub-list for extension type_name + 19, // [19:19] is the sub-list for extension extendee + 0, // [0:19] is the sub-list for field type_name +} + +func init() { file_bpmetadata_ui_ext_proto_init() } +func file_bpmetadata_ui_ext_proto_init() { + if File_bpmetadata_ui_ext_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_bpmetadata_ui_ext_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GooglePropertyExtension); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_ui_ext_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GCELocationExtension); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_ui_ext_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GCEMachineTypeExtension); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_ui_ext_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GCEGPUTypeExtension); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_ui_ext_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GCEGPUCountExtension); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_ui_ext_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GCEDiskTypeExtension); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_ui_ext_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GCEDiskSizeExtension); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_ui_ext_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GCENetworkExtension); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_ui_ext_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GCEExternalIPExtension); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_ui_ext_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GCEIPForwardingExtension); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_ui_ext_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GCEFirewallExtension); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_ui_ext_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GCEFirewallRangeExtension); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_ui_ext_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GCESubnetworkExtension); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_ui_ext_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GCEGenericResourceExtension); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_ui_ext_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*IAMServiceAccountExtension); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bpmetadata_ui_ext_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GKEClusterExtension); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_bpmetadata_ui_ext_proto_rawDesc, + NumEnums: 2, + NumMessages: 16, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_bpmetadata_ui_ext_proto_goTypes, + DependencyIndexes: file_bpmetadata_ui_ext_proto_depIdxs, + EnumInfos: file_bpmetadata_ui_ext_proto_enumTypes, + MessageInfos: file_bpmetadata_ui_ext_proto_msgTypes, + }.Build() + File_bpmetadata_ui_ext_proto = out.File + file_bpmetadata_ui_ext_proto_rawDesc = nil + file_bpmetadata_ui_ext_proto_goTypes = nil + file_bpmetadata_ui_ext_proto_depIdxs = nil +} diff --git a/cli/bpmetadata/cmd.go b/cli/bpmetadata/cmd.go new file mode 100644 index 00000000000..6375f86b23d --- /dev/null +++ b/cli/bpmetadata/cmd.go @@ -0,0 +1,500 @@ +package bpmetadata + +import ( + "errors" + "fmt" + "os" + "path" + "strings" + + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/cli/util" + "github.com/itchyny/json2yaml" + "github.com/spf13/cobra" + "github.com/spf13/viper" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" + "sigs.k8s.io/yaml" +) + +var mdFlags struct { + path string + nested bool + force bool + display bool + validate bool + quiet bool + genOutputType bool +} + +const ( + readmeFileName = "README.md" + tfVersionsFileName = "versions.tf" + tfRolesFileName = "test/setup/iam.tf" + tfServicesFileName = "test/setup/main.tf" + iconFilePath = "assets/icon.png" + modulesPath = "modules/" + examplesPath = "examples" + metadataFileName = "metadata.yaml" + metadataDisplayFileName = "metadata.display.yaml" + metadataApiVersion = "blueprints.cloud.google.com/v1alpha1" + metadataKind = "BlueprintMetadata" + localConfigAnnotation = "config.kubernetes.io/local-config" +) + +func init() { + viper.AutomaticEnv() + + Cmd.Flags().BoolVarP(&mdFlags.display, "display", "d", false, "Generate the display metadata used for UI rendering.") + Cmd.Flags().BoolVarP(&mdFlags.force, "force", "f", false, "Force the generation of fresh metadata.") + Cmd.Flags().StringVarP(&mdFlags.path, "path", "p", ".", "Path to the blueprint for generating metadata.") + Cmd.Flags().BoolVar(&mdFlags.nested, "nested", true, "Flag for generating metadata for nested blueprint, if any.") + Cmd.Flags().BoolVarP(&mdFlags.validate, "validate", "v", false, "Validate metadata against the schema definition.") + Cmd.Flags().BoolVarP(&mdFlags.quiet, "quiet", "q", false, "Run in quiet mode suppressing all prompts.") + Cmd.Flags().BoolVarP(&mdFlags.genOutputType, "generate-output-type", "g", false, "Automatically generate type field for outputs.") +} + +var Cmd = &cobra.Command{ + Use: "metadata", + Short: "Generates blueprint metadata", + Long: `Generates metadata.yaml for specified blueprint`, + Args: cobra.NoArgs, + RunE: generate, +} + +var repoDetails repoDetail + +// The top-level command function that generates metadata based on the provided flags +func generate(cmd *cobra.Command, args []string) error { + wdPath, err := os.Getwd() + if err != nil { + return fmt.Errorf("error getting working dir: %w", err) + } + + // validate metadata if there is an argument passed into the command + if mdFlags.validate { + if err := validateMetadata(mdFlags.path, wdPath); err != nil { + return err + } + + return nil + } + + currBpPath := mdFlags.path + if !path.IsAbs(mdFlags.path) { + currBpPath = path.Join(wdPath, mdFlags.path) + } + + var allBpPaths []string + _, err = os.Stat(path.Join(currBpPath, readmeFileName)) + + // throw an error and exit if root level readme.md doesn't exist + if err != nil { + return fmt.Errorf("top-level module does not have a readme: %w", err) + } + + allBpPaths = append(allBpPaths, currBpPath) + var errors []string + + // if nested, check if modules/ exists and create paths + // for submodules + if mdFlags.nested { + modulesPathforBp := path.Join(currBpPath, modulesPath) + _, err = os.Stat(modulesPathforBp) + if os.IsNotExist(err) { + Log.Info("sub-modules do not exist for this blueprint") + } else { + moduleDirs, err := util.WalkTerraformDirs(modulesPathforBp) + if err != nil { + errors = append(errors, err.Error()) + } else { + allBpPaths = append(allBpPaths, moduleDirs...) + } + } + } + + for _, modPath := range allBpPaths { + // check if module path has readme.md + _, err := os.Stat(path.Join(modPath, readmeFileName)) + + // log info if a sub-module doesn't have a readme.md and continue + if err != nil { + Log.Info("skipping metadata for sub-module identified as an internal module", "Path:", modPath) + continue + } + + err = generateMetadataForBpPath(modPath) + if err != nil { + e := fmt.Sprintf("path: %s\n %s", modPath, err.Error()) + errors = append(errors, e) + } + } + + if len(errors) > 0 { + return fmt.Errorf("%s", strings.Join(errors, "\n")) + } + + Log.Info("metadata generated successfully") + return nil +} + +func generateMetadataForBpPath(bpPath string) error { + //try to read existing metadata.yaml + bpObj, err := UnmarshalMetadata(bpPath, metadataFileName) + if err != nil && !errors.Is(err, os.ErrNotExist) && !mdFlags.force { + return err + } + + // create core metadata + bpMetaObj, err := CreateBlueprintMetadata(bpPath, bpObj) + if err != nil { + return fmt.Errorf("error creating metadata for blueprint at path: %s. Details: %w", bpPath, err) + } + + // If the flag is set, update output types + if mdFlags.genOutputType { + err = updateOutputTypes(bpPath, bpMetaObj.Spec.Interfaces) + if err != nil { + return fmt.Errorf("error updating output types: %w", err) + } + } + + // write core metadata to disk + err = WriteMetadata(bpMetaObj, bpPath, metadataFileName) + if err != nil { + return fmt.Errorf("error writing metadata to disk for blueprint at path: %s. Details: %w", bpPath, err) + } + + // continue with creating display metadata if the flag is set, + // else let the command exit + if !mdFlags.display { + return nil + } + + bpDpObj, err := UnmarshalMetadata(bpPath, metadataDisplayFileName) + if err != nil && !errors.Is(err, os.ErrNotExist) && !mdFlags.force { + return err + } + + // create display metadata + bpMetaDpObj, err := CreateBlueprintDisplayMetadata(bpPath, bpDpObj, bpMetaObj) + if err != nil { + return fmt.Errorf("error creating display metadata for blueprint at path: %s. Details: %w", bpPath, err) + } + + // write display metadata to disk + err = WriteMetadata(bpMetaDpObj, bpPath, metadataDisplayFileName) + if err != nil { + return fmt.Errorf("error writing display metadata to disk for blueprint at path: %s. Details: %w", bpPath, err) + } + + return nil +} + +func CreateBlueprintMetadata(bpPath string, bpMetadataObj *BlueprintMetadata) (*BlueprintMetadata, error) { + // Verify that readme is present. + readmeContent, err := os.ReadFile(path.Join(bpPath, readmeFileName)) + if err != nil { + return nil, fmt.Errorf("blueprint readme markdown is missing, create one using https://tinyurl.com/tf-mod-readme | error: %w", err) + } + + // verify that the blueprint path is valid & get repo details + getRepoDetailsByPath(bpPath, &repoDetails, readmeContent) + if repoDetails.ModuleName == "" && !mdFlags.quiet { + fmt.Printf("Provide a name for the blueprint at path [%s]: ", bpPath) + _, err := fmt.Scan(&repoDetails.ModuleName) + if err != nil { + fmt.Println("Unable to scan the name for the blueprint.") + } + } + + if repoDetails.Source.URL == "" && !mdFlags.quiet { + fmt.Printf("Provide a URL for the blueprint source at path [%s]: ", bpPath) + _, err := fmt.Scan(&repoDetails.Source.URL) + if err != nil { + fmt.Println("Unable to scan the URL for the blueprint.") + } + } + + // start creating blueprint metadata + bpMetadataObj.ApiVersion = metadataApiVersion + bpMetadataObj.Kind = metadataKind + + if bpMetadataObj.Metadata == nil { + bpMetadataObj.Metadata = &ResourceTypeMeta{ + Name: repoDetails.ModuleName, + Annotations: map[string]string{localConfigAnnotation: "true"}, + } + } + + if bpMetadataObj.Spec == nil { + bpMetadataObj.Spec = &BlueprintMetadataSpec{} + } + + if bpMetadataObj.Spec.Info == nil { + bpMetadataObj.Spec.Info = &BlueprintInfo{} + } + + // create blueprint info + err = bpMetadataObj.Spec.Info.create(bpPath, repoDetails, readmeContent) + if err != nil { + return nil, fmt.Errorf("error creating blueprint info: %w", err) + } + + var existingInterfaces *BlueprintInterface + if bpMetadataObj.Spec.Interfaces == nil { + bpMetadataObj.Spec.Interfaces = &BlueprintInterface{} + } else { + existingInterfaces = proto.Clone(bpMetadataObj.Spec.Interfaces).(*BlueprintInterface) + } + + // create blueprint interfaces i.e. variables & outputs + err = bpMetadataObj.Spec.Interfaces.create(bpPath) + if err != nil { + return nil, fmt.Errorf("error creating blueprint interfaces: %w", err) + } + + // Merge existing connections (if any) into the newly generated interfaces + mergeExistingConnections(bpMetadataObj.Spec.Interfaces, existingInterfaces) + + // Merge existing output types (if any) into the newly generated interfaces + mergeExistingOutputTypes(bpMetadataObj.Spec.Interfaces, existingInterfaces) + + // get blueprint requirements + rolesCfgPath := path.Join(repoDetails.Source.BlueprintRootPath, tfRolesFileName) + svcsCfgPath := path.Join(repoDetails.Source.BlueprintRootPath, tfServicesFileName) + versionsCfgPath := path.Join(bpPath, tfVersionsFileName) + requirements, err := getBlueprintRequirements(rolesCfgPath, svcsCfgPath, versionsCfgPath) + if err != nil { + Log.Info("skipping blueprint requirements since roles and/or services configurations were not found as per https://tinyurl.com/tf-iam and https://tinyurl.com/tf-services") + } else { + bpMetadataObj.Spec.Requirements = requirements + } + + if bpMetadataObj.Spec.Content == nil { + bpMetadataObj.Spec.Content = &BlueprintContent{} + } + + // create blueprint content i.e. documentation, icons, etc. + bpMetadataObj.Spec.Content.create(bpPath, repoDetails.Source.BlueprintRootPath, readmeContent) + return bpMetadataObj, nil +} + +func CreateBlueprintDisplayMetadata(bpPath string, bpDisp, bpCore *BlueprintMetadata) (*BlueprintMetadata, error) { + // start creating blueprint metadata + bpDisp.ApiVersion = bpCore.ApiVersion + bpDisp.Kind = bpCore.Kind + + if bpDisp.Metadata == nil { + bpDisp.Metadata = &ResourceTypeMeta{ + Name: bpCore.Metadata.Name + "-display", + Annotations: map[string]string{localConfigAnnotation: "true"}, + } + } + + if bpDisp.Spec == nil { + bpDisp.Spec = &BlueprintMetadataSpec{} + } + + if bpDisp.Spec.Info == nil { + bpDisp.Spec.Info = &BlueprintInfo{} + } + + if bpDisp.Spec.Ui == nil { + bpDisp.Spec.Ui = &BlueprintUI{} + bpDisp.Spec.Ui.Input = &BlueprintUIInput{} + } + + bpDisp.Spec.Info.Title = bpCore.Spec.Info.Title + bpDisp.Spec.Info.Source = bpCore.Spec.Info.Source + buildUIInputFromVariables(bpCore.Spec.Interfaces.Variables, bpDisp.Spec.Ui.Input) + + existingInput := func() *BlueprintUIInput { + if bpCore.Spec.Ui != nil && bpCore.Spec.Ui.Input != nil { + return proto.Clone(bpCore.Spec.Ui.Input).(*BlueprintUIInput) + } + return &BlueprintUIInput{} + }() + // Merge existing data (if any) into the newly generated UI Input + mergeExistingAltDefaults(bpDisp.Spec.Ui.Input, existingInput) + + return bpDisp, nil +} + +func (i *BlueprintInfo) create(bpPath string, r repoDetail, readmeContent []byte) error { + title, err := getMdContent(readmeContent, 1, 1, "", false) + if err != nil { + return fmt.Errorf("title tag missing in markdown, err: %w", err) + } + + i.Title = title.literal + rootPath := r.Source.RepoRootPath + if rootPath == "" { + rootPath = r.Source.BlueprintRootPath + } + + bpDir := strings.ReplaceAll(bpPath, rootPath, "") + i.Source = &BlueprintRepoDetail{ + Repo: r.Source.URL, + SourceType: r.Source.SourceType, + Dir: bpDir, + } + + versionInfo, err := getBlueprintVersion(path.Join(bpPath, tfVersionsFileName)) + if err == nil { + i.Version = versionInfo.moduleVersion + i.ActuationTool = &BlueprintActuationTool{ + Version: versionInfo.requiredTfVersion, + Flavor: "Terraform", + } + } + + // create descriptions + i.Description = &BlueprintDescription{} + tagline, err := getMdContent(readmeContent, -1, -1, "Tagline", true) + if err == nil { + i.Description.Tagline = tagline.literal + } + + detailed, err := getMdContent(readmeContent, -1, -1, "Detailed", true) + if err == nil { + i.Description.Detailed = detailed.literal + } + + preDeploy, err := getMdContent(readmeContent, -1, -1, "PreDeploy", true) + if err == nil { + i.Description.PreDeploy = preDeploy.literal + } + + var archListToSet []string + architecture, err := getMdContent(readmeContent, -1, -1, "Architecture", true) + if err == nil { + for _, li := range architecture.listItems { + archListToSet = append(archListToSet, li.text) + } + + i.Description.Architecture = archListToSet + } + + // create icon + iPath := path.Join(r.Source.BlueprintRootPath, iconFilePath) + exists, _ := fileExists(iPath) + if exists { + i.Icon = iconFilePath + } + + d, err := getDeploymentDuration(readmeContent, "Deployment Duration") + if err == nil { + i.DeploymentDuration = d + } + + c, err := getCostEstimate(readmeContent, "Cost") + if err == nil { + i.CostEstimate = c + } + + return nil +} + +func (i *BlueprintInterface) create(bpPath string) error { + interfaces, err := getBlueprintInterfaces(bpPath) + if err != nil { + return err + } + + i.Variables = interfaces.Variables + i.Outputs = interfaces.Outputs + + return nil +} + +func (c *BlueprintContent) create(bpPath string, rootPath string, readmeContent []byte) { + var docListToSet []*BlueprintListContent + documentation, err := getMdContent(readmeContent, -1, -1, "Documentation", true) + if err == nil { + for _, li := range documentation.listItems { + doc := &BlueprintListContent{ + Title: li.text, + Url: li.url, + } + + docListToSet = append(docListToSet, doc) + } + + c.Documentation = docListToSet + } + + // create architecture + a, err := getArchitctureInfo(readmeContent, "Architecture") + if err == nil { + c.Architecture = a + } + + // create sub-blueprints + modPath := path.Join(bpPath, modulesPath) + modContent, err := getModules(modPath) + if err == nil { + c.SubBlueprints = modContent + } + + // create examples + exPath := path.Join(rootPath, examplesPath) + exContent, err := getExamples(exPath) + if err == nil { + c.Examples = exContent + } +} + +func WriteMetadata(obj *BlueprintMetadata, bpPath, fileName string) error { + jBytes, err := protojson.Marshal(obj) + if err != nil { + return err + } + + input := strings.NewReader(string(jBytes)) + var output strings.Builder + if err := json2yaml.Convert(&output, input); err != nil { + return err + } + + return os.WriteFile(path.Join(bpPath, fileName), []byte(output.String()), 0644) +} + +func UnmarshalMetadata(bpPath, fileName string) (*BlueprintMetadata, error) { + bpObj := BlueprintMetadata{} + metaFilePath := path.Join(bpPath, fileName) + + // return empty metadata if file does not exist or if the file is not read + if _, err := os.Stat(metaFilePath); errors.Is(err, os.ErrNotExist) { + return &bpObj, err + } + + f, err := os.ReadFile(metaFilePath) + if err != nil { + return &bpObj, fmt.Errorf("unable to read metadata from the existing file: %w", err) + } + + // convert yaml bytes to json bytes for unmarshaling metadata + // content to proto definition + j, err := yaml.YAMLToJSON(f) + if err != nil { + return nil, err + } + + if err := protojson.Unmarshal(j, &bpObj); err != nil { + return &bpObj, err + } + + currVersion := bpObj.ApiVersion + currKind := bpObj.Kind + + //validate GVK for current metadata + if currVersion != metadataApiVersion { + return &bpObj, fmt.Errorf("found incorrect version for the metadata: %s. Supported version is: %s", currVersion, metadataApiVersion) + } + + if currKind != metadataKind { + return &bpObj, fmt.Errorf("found incorrect kind for the metadata: %s. Supported kind is %s", currKind, metadataKind) + } + + return &bpObj, nil +} diff --git a/cli/bpmetadata/cmd_test.go b/cli/bpmetadata/cmd_test.go new file mode 100644 index 00000000000..458e5cb791d --- /dev/null +++ b/cli/bpmetadata/cmd_test.go @@ -0,0 +1,152 @@ +package bpmetadata + +import ( + "testing" + + "github.com/spf13/cobra" + "github.com/stretchr/testify/assert" +) + +func TestCmdExecution(t *testing.T) { + tests := []struct { + description string + args []string + expectErr bool + }{ + { + description: "execute metadata command with valid inputs", + args: []string{"metadata", "--help"}, + expectErr: false, + }, + { + description: "execute metadata command with invalid inputs", + args: []string{"metadata", "--invalid-flag"}, + expectErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + cmd := &cobra.Command{Use: "cft"} + cmd.SetArgs(tt.args) + + _, err := cmd.ExecuteC() + + if tt.expectErr { + assert.Error(t, err, "Command should return an error") + } else { + assert.NoError(t, err, "Command should execute without error") + } + }) + } +} + +func TestCreateBlueprintDisplayMetadata(t *testing.T) { + tests := []struct { + description string + bpPath string + bpDisp *BlueprintMetadata + bpCore *BlueprintMetadata + expectErr bool + }{ + { + description: "create metadata with nil display metadata Spec.UI.Input", + bpPath: "/path/to/blueprint", + bpDisp: &BlueprintMetadata{}, + bpCore: &BlueprintMetadata{ + ApiVersion: "v1", + Kind: "Blueprint", + Metadata: &ResourceTypeMeta{ + Name: "core-blueprint", + Labels: map[string]string{ + "env": "core", + }, + }, + Spec: &BlueprintMetadataSpec{ + Info: &BlueprintInfo{ + Title: "Core Blueprint", + Version: "1.0.0", + Icon: "assets/core_icon.png", + SingleDeployment: false, + }, + Interfaces: &BlueprintInterface{ + Variables: []*BlueprintVariable{ + { + Name: "test_var_1", + }, + }, + }, + Ui: &BlueprintUI{ + Input: nil, + }, + }, + }, + expectErr: false, + }, + { + description: "create metadata with valid input", + bpPath: "/path/to/blueprint", + bpDisp: &BlueprintMetadata{ + Spec: &BlueprintMetadataSpec{ + Ui: &BlueprintUI{ + Input: &BlueprintUIInput{ + Variables: map[string]*DisplayVariable{ + "test_var_1": { + Name: "test var 1", + Title: "This is a test input", + }, + }, + }, + }, + }, + }, + bpCore: &BlueprintMetadata{ + ApiVersion: "v1", + Kind: "Blueprint", + Metadata: &ResourceTypeMeta{ + Name: "core-blueprint", + Labels: map[string]string{ + "env": "core", + }, + }, + Spec: &BlueprintMetadataSpec{ + Info: &BlueprintInfo{ + Title: "Core Blueprint", + Version: "1.0.0", + Icon: "assets/core_icon.png", + SingleDeployment: false, + }, + Interfaces: &BlueprintInterface{ + Variables: []*BlueprintVariable{ + { + Name: "test_var_1", + }, + }, + }, + Ui: &BlueprintUI{ + Input: nil, + }, + }, + }, + expectErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + metadata, err := CreateBlueprintDisplayMetadata(tt.bpPath, tt.bpDisp, tt.bpCore) + if tt.expectErr { + assert.Error(t, err, "Function should return an error") + assert.Nil(t, metadata, "Metadata should be nil when there is an error") + } else { + assert.NoError(t, err, "Function should not return an error") + assert.NotNil(t, metadata, "Metadata should not be nil") + if tt.bpDisp != nil { + assert.Equal(t, tt.bpDisp.Metadata.Name, metadata.Metadata.Name, "Metadata name should match the input") + assert.Equal(t, tt.bpDisp.Spec.Info.Title, metadata.Spec.Info.Title, "Metadata title should match the input") + assert.Equal(t, tt.bpDisp.Spec.Info.Version, metadata.Spec.Info.Version, "Metadata version should match the input") + } + } + }) + } +} diff --git a/cli/bpmetadata/display.go b/cli/bpmetadata/display.go new file mode 100644 index 00000000000..fbe31eda8b1 --- /dev/null +++ b/cli/bpmetadata/display.go @@ -0,0 +1,52 @@ +package bpmetadata + +import ( + "strings" + + "golang.org/x/text/cases" + "golang.org/x/text/language" +) + +func buildUIInputFromVariables(vars []*BlueprintVariable, input *BlueprintUIInput) { + if input.Variables == nil { + input.Variables = make(map[string]*DisplayVariable) + } + + for _, v := range vars { + _, hasDisplayVar := input.Variables[v.Name] + if hasDisplayVar { + continue + } + + input.Variables[v.Name] = &DisplayVariable{ + Name: v.Name, + Title: createTitleFromName(v.Name), + } + } +} + +func createTitleFromName(name string) string { + nameSplit := strings.Split(name, "_") + var titleSplit []string + for _, n := range nameSplit { + titleSplit = append(titleSplit, cases.Title(language.Und, cases.NoLower).String(n)) + } + + return strings.Join(titleSplit, " ") +} + +// mergeExistingAltDefaults merges existing alt_defaults from an old BlueprintUIInput into a new one, +// preserving manually authored alt_defaults. +func mergeExistingAltDefaults(newInput, existingInput *BlueprintUIInput) { + if existingInput == nil { + return // Nothing to merge if existingInput is nil + } + + for i, variable := range newInput.Variables { + for _, existingVariable := range existingInput.Variables { + if variable.Name == existingVariable.Name && existingVariable.AltDefaults != nil { + newInput.Variables[i].AltDefaults = existingVariable.AltDefaults + } + } + } +} diff --git a/cli/bpmetadata/display_test.go b/cli/bpmetadata/display_test.go new file mode 100644 index 00000000000..04c35aabe8f --- /dev/null +++ b/cli/bpmetadata/display_test.go @@ -0,0 +1,224 @@ +package bpmetadata + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "google.golang.org/protobuf/types/known/structpb" +) + +func TestUIInputFromVariables(t *testing.T) { + tests := []struct { + name string + coreVars []*BlueprintVariable + UIinput *BlueprintUIInput + }{ + { + name: "display metadata does not exist", + coreVars: []*BlueprintVariable{ + { + Name: "test_var_1", + }, + { + Name: "test_var_2", + }, + { + Name: "test_var_3", + }, + }, + UIinput: &BlueprintUIInput{}, + }, + { + name: "display metadata exists and is in line with core metadata", + coreVars: []*BlueprintVariable{ + { + Name: "test_var_1", + }, + { + Name: "test_var_2", + }, + { + Name: "test_var_3", + }, + }, + UIinput: &BlueprintUIInput{ + Variables: map[string]*DisplayVariable{ + "test_var_1": { + Name: "test_var_1", + }, + "test_var_2": { + Name: "test_var_2", + }, + "test_var_3": { + Name: "test_var_3", + }, + }, + }, + }, + { + name: "display metadata exists and is not in line with core metadata", + coreVars: []*BlueprintVariable{ + { + Name: "test_var_1", + }, + { + Name: "test_var_2", + }, + { + Name: "test_var_4", + }, + }, + UIinput: &BlueprintUIInput{ + Variables: map[string]*DisplayVariable{ + "test_var_1": { + Name: "test_var_1", + }, + "test_var_2": { + Name: "test_var_2", + }, + "test_var_3": { + Name: "test_var_3", + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + buildUIInputFromVariables(tt.coreVars, tt.UIinput) + for _, v := range tt.coreVars { + dispVar := tt.UIinput.Variables[v.Name] + assert.NotNil(t, dispVar) + assert.Equal(t, v.Name, dispVar.Name) + } + + assert.GreaterOrEqual(t, len(tt.UIinput.Variables), len(tt.coreVars)) + }) + } +} + +func TestCreateTitleFromName(t *testing.T) { + tests := []struct { + name string + inputName string + wantTitle string + }{ + { + name: "name with underscores", + inputName: "foo_bar_baz", + wantTitle: "Foo Bar Baz", + }, + { + name: "name with underscores w/ numbers", + inputName: "foo_bar_baz_01", + wantTitle: "Foo Bar Baz 01", + }, + { + name: "name w/o underscores", + inputName: "FooBarBaz", + wantTitle: "FooBarBaz", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := createTitleFromName(tt.inputName) + if got != tt.wantTitle { + t.Errorf("createTitleFromName() = %v, want %v", got, tt.wantTitle) + } + }) + } +} + +func TestMergeExistingAltDefaults(t *testing.T) { + tests := []struct { + name string + newInput *BlueprintUIInput + existingInput *BlueprintUIInput + expectInput *BlueprintUIInput + }{ + { + name: "Merge alt default into UI input", + newInput: &BlueprintUIInput{ + Variables: map[string]*DisplayVariable{ + "test_var_1": { + Name: "test_var_1", + }, + }, + }, + existingInput: &BlueprintUIInput{ + Variables: map[string]*DisplayVariable{ + "test_var_1": { + Name: "test_var_1", + AltDefaults: []*DisplayVariable_AlternateDefault{ + { + Type: 0, + Value: structpb.NewStringValue("alt_default_value"), + }, + }, + }, + }, + }, + expectInput: &BlueprintUIInput{ + Variables: map[string]*DisplayVariable{ + "test_var_1": { + Name: "test_var_1", + AltDefaults: []*DisplayVariable_AlternateDefault{ + { + Type: 0, + Value: structpb.NewStringValue("alt_default_value"), + }, + }, + }, + }, + }, + }, + { + name: "No existing input", + newInput: &BlueprintUIInput{ + Variables: map[string]*DisplayVariable{ + "test_var_1": { + Name: "test_var_1", + }, + }, + }, + existingInput: nil, + expectInput: &BlueprintUIInput{ + Variables: map[string]*DisplayVariable{ + "test_var_1": { + Name: "test_var_1", + }, + }, + }, + }, + { + name: "Empty new input", + newInput: &BlueprintUIInput{ + Variables: map[string]*DisplayVariable{}, + }, + existingInput: &BlueprintUIInput{ + Variables: map[string]*DisplayVariable{ + "test_var_1": { + Name: "test_var_1", + AltDefaults: []*DisplayVariable_AlternateDefault{ + { + Type: 0, + Value: structpb.NewStringValue("alt_default_value"), + }, + }, + }, + }, + }, + expectInput: &BlueprintUIInput{ + Variables: map[string]*DisplayVariable{}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mergeExistingAltDefaults(tt.newInput, tt.existingInput) + assert.Equal(t, tt.newInput, tt.expectInput) + }) + } +} diff --git a/cli/bpmetadata/doc.go b/cli/bpmetadata/doc.go new file mode 100644 index 00000000000..78eab553da1 --- /dev/null +++ b/cli/bpmetadata/doc.go @@ -0,0 +1,65 @@ +// Package bpmetadata generates and validates metadata for Terraform blueprint packages. +// +// bpmetadata is a tool that generates metadata for Terraform packages that are structured based +// on the +// [CFT Module Template]. Executing the cli at the root of a Terraform blueprint package or pointing +// to one with a "path" flag will generate "metadata.yaml". Additionally a UI-display specific +// metadata file can also be generated by providing the cli with a "display" flag. +// +// # Downloading the CFT CLI +// +// Download the CFT CLI as: +// +// curl https://storage.googleapis.com/cft-cli//cft--amd64 --output cft +// +// Where: +// - VERSION can be set to "latest" or to a specific semver e.g. "v0.5.0". It is recommended to +// use the "latest" version. +// - PLATFORM can be set as "linux", "windows" or "darwin". +// +// e.g. the latest version of the CLI for linux can be downloaded as: +// +// curl https://storage.googleapis.com/cft-cli/latest/cft-linux-amd64 --output cft +// +// This will download the CLI binary to the current working directory. +// +// # Generating metadata for a Terraform package +// +// Prerequisite: The Terraform package should conform to the folder structure defined by the +// [CFT Module Template]. A vanilla package can be generated using `cookiecutter` as explained in +// the CFT Module Template's documentation. +// +// Generate metadata using the CLI by pointing the CLI to the root of the Terraform package as: +// +// cft blueprint metadata -p -d +// +// This will generate two files i.e. "metadata.yaml" and "metadata.display.yaml" for each root and +// sub-modules available in the Terraform Blueprint. "metadata.yaml" is mostly auto generated while +// "metadata.display.yaml" is expected to be hand-authored. +// +// All fields (auto generated and manually authored) supported by the metadata schema can be found +// under the top-level struct type [BlueprintMetadata]. +// +// Refer to sample versions of [metadata.yaml] and [metadata.display.yaml] for the [canonical] +// Terraform package. +// +// For all available flags for the CLI, use help for cft as: +// +// cft blueprint metadata -h +// +// # Validating metadata for schema consistencies +// +// Validate metadata for your root and sub modules with the CFT CLI as: +// +// cft blueprint metadata -v +// +// This will output a success message i.e. "metadata is valid" if all fields in all metadata files +// are consistent with the [BlueprintMetadata] schema. Otherwise, error messages for invalid field +// names, types or values will be shown. +// +// [BlueprintMetadata]: https://pkg.go.dev/github.com/GoogleCloudPlatform/cloud-foundation-toolkit/cli/bpmetadata#BlueprintMetadata +// [metadata.yaml]: https://github.com/g-awmalik/terraform-google-canonical-mp/blob/main/metadata.yaml +// [metadata.display.yaml]: https://github.com/g-awmalik/terraform-google-canonical-mp/blob/main/metadata.display.yaml +// [CFT Module Template]: https://github.com/terraform-google-modules/terraform-google-module-template +// [canonical]: https://github.com/g-awmalik/terraform-google-canonical-mp/tree/main +package bpmetadata diff --git a/cli/bpmetadata/int-test/goldens/golden-metadata.display.yaml b/cli/bpmetadata/int-test/goldens/golden-metadata.display.yaml new file mode 100644 index 00000000000..3b391c64aaa --- /dev/null +++ b/cli/bpmetadata/int-test/goldens/golden-metadata.display.yaml @@ -0,0 +1,129 @@ +apiVersion: blueprints.cloud.google.com/v1alpha1 +kind: BlueprintMetadata +metadata: + name: terraform-google-cloud-storage-display + annotations: + config.kubernetes.io/local-config: "true" +spec: + info: + title: Terraform Google Cloud Storage Module + source: + repo: https://github.com/terraform-google-modules/terraform-google-cloud-storage.git + sourceType: git + ui: + input: + variables: + admins: + name: admins + title: Admins + bucket_admins: + name: bucket_admins + title: Bucket Admins + bucket_creators: + name: bucket_creators + title: Bucket Creators + bucket_hmac_key_admins: + name: bucket_hmac_key_admins + title: Bucket Hmac Key Admins + bucket_lifecycle_rules: + name: bucket_lifecycle_rules + title: Bucket Lifecycle Rules + bucket_policy_only: + name: bucket_policy_only + title: Bucket Policy Only + bucket_storage_admins: + name: bucket_storage_admins + title: Bucket Storage Admins + bucket_viewers: + name: bucket_viewers + title: Bucket Viewers + cors: + name: cors + title: Cors + creators: + name: creators + title: Creators + custom_placement_config: + name: custom_placement_config + title: Custom Placement Config + default_event_based_hold: + name: default_event_based_hold + title: Default Event Based Hold + encryption_key_names: + name: encryption_key_names + title: Encryption Key Names + folders: + name: folders + title: Folders + force_destroy: + name: force_destroy + title: Force Destroy + hmac_key_admins: + name: hmac_key_admins + title: Hmac Key Admins + hmac_service_accounts: + name: hmac_service_accounts + title: Hmac Service Accounts + labels: + name: labels + title: Labels + lifecycle_rules: + name: lifecycle_rules + title: Lifecycle Rules + location: + name: location + title: Location + logging: + name: logging + title: Logging + names: + name: names + title: Names + prefix: + name: prefix + title: Prefix + project_id: + name: project_id + title: Project Id + public_access_prevention: + name: public_access_prevention + title: Public Access Prevention + randomize_suffix: + name: randomize_suffix + title: Randomize Suffix + retention_policy: + name: retention_policy + title: Retention Policy + set_admin_roles: + name: set_admin_roles + title: Set Admin Roles + set_creator_roles: + name: set_creator_roles + title: Set Creator Roles + set_hmac_access: + name: set_hmac_access + title: Set Hmac Access + set_hmac_key_admin_roles: + name: set_hmac_key_admin_roles + title: Set Hmac Key Admin Roles + set_storage_admin_roles: + name: set_storage_admin_roles + title: Set Storage Admin Roles + set_viewer_roles: + name: set_viewer_roles + title: Set Viewer Roles + storage_admins: + name: storage_admins + title: Storage Admins + storage_class: + name: storage_class + title: Storage Class + versioning: + name: versioning + title: Versioning + viewers: + name: viewers + title: Viewers + website: + name: website + title: Website diff --git a/cli/bpmetadata/int-test/goldens/golden-metadata.yaml b/cli/bpmetadata/int-test/goldens/golden-metadata.yaml new file mode 100644 index 00000000000..152aeed668d --- /dev/null +++ b/cli/bpmetadata/int-test/goldens/golden-metadata.yaml @@ -0,0 +1,256 @@ +apiVersion: blueprints.cloud.google.com/v1alpha1 +kind: BlueprintMetadata +metadata: + name: terraform-google-cloud-storage + annotations: + config.kubernetes.io/local-config: "true" +spec: + info: + title: Terraform Google Cloud Storage Module + source: + repo: https://github.com/terraform-google-modules/terraform-google-cloud-storage.git + sourceType: git + version: 4.0.0 + actuationTool: + flavor: Terraform + version: ">= 0.13" + description: {} + content: + subBlueprints: + - name: simple_bucket + location: modules/simple_bucket + examples: + - name: multiple_buckets + location: examples/multiple_buckets + - name: simple_bucket + location: examples/simple_bucket + interfaces: + variables: + - name: project_id + description: Bucket project id. + varType: string + required: true + - name: prefix + description: Prefix used to generate the bucket name. + varType: string + defaultValue: "" + - name: names + description: Bucket name suffixes. + varType: list(string) + required: true + - name: randomize_suffix + description: Adds an identical, but randomized 4-character suffix to all bucket names + varType: bool + defaultValue: false + - name: location + description: Bucket location. + varType: string + defaultValue: EU + - name: storage_class + description: Bucket storage class. + varType: string + defaultValue: STANDARD + - name: force_destroy + description: Optional map of lowercase unprefixed name => boolean, defaults to false. + varType: map(bool) + defaultValue: {} + - name: versioning + description: Optional map of lowercase unprefixed name => boolean, defaults to false. + varType: map(bool) + defaultValue: {} + - name: encryption_key_names + description: Optional map of lowercase unprefixed name => string, empty strings are ignored. + varType: map(string) + defaultValue: {} + - name: bucket_policy_only + description: Disable ad-hoc ACLs on specified buckets. Defaults to true. Map of lowercase unprefixed name => boolean + varType: map(bool) + defaultValue: {} + - name: default_event_based_hold + description: Enable event based hold to new objects added to specific bucket. Defaults to false. Map of lowercase unprefixed name => boolean + varType: map(bool) + defaultValue: {} + - name: admins + description: IAM-style members who will be granted roles/storage.objectAdmin on all buckets. + varType: list(string) + defaultValue: [] + - name: creators + description: IAM-style members who will be granted roles/storage.objectCreators on all buckets. + varType: list(string) + defaultValue: [] + - name: viewers + description: IAM-style members who will be granted roles/storage.objectViewer on all buckets. + varType: list(string) + defaultValue: [] + - name: hmac_key_admins + description: IAM-style members who will be granted roles/storage.hmacKeyAdmin on all buckets. + varType: list(string) + defaultValue: [] + - name: storage_admins + description: IAM-style members who will be granted roles/storage.admin on all buckets. + varType: list(string) + defaultValue: [] + - name: bucket_admins + description: Map of lowercase unprefixed name => comma-delimited IAM-style per-bucket admins. + varType: map(string) + defaultValue: {} + - name: bucket_creators + description: Map of lowercase unprefixed name => comma-delimited IAM-style per-bucket creators. + varType: map(string) + defaultValue: {} + - name: bucket_viewers + description: Map of lowercase unprefixed name => comma-delimited IAM-style per-bucket viewers. + varType: map(string) + defaultValue: {} + - name: bucket_hmac_key_admins + description: Map of lowercase unprefixed name => comma-delimited IAM-style per-bucket HMAC Key admins. + varType: map(string) + defaultValue: {} + - name: bucket_storage_admins + description: Map of lowercase unprefixed name => comma-delimited IAM-style per-bucket storage admins. + varType: map(string) + defaultValue: {} + - name: labels + description: Labels to be attached to the buckets + varType: map(string) + defaultValue: {} + - name: folders + description: Map of lowercase unprefixed name => list of top level folder objects. + varType: map(list(string)) + defaultValue: {} + - name: set_admin_roles + description: Grant roles/storage.objectAdmin role to admins and bucket_admins. + varType: bool + defaultValue: false + - name: set_creator_roles + description: Grant roles/storage.objectCreator role to creators and bucket_creators. + varType: bool + defaultValue: false + - name: set_viewer_roles + description: Grant roles/storage.objectViewer role to viewers and bucket_viewers. + varType: bool + defaultValue: false + - name: set_hmac_key_admin_roles + description: Grant roles/storage.hmacKeyAdmin role to hmac_key_admins and bucket_hmac_key_admins. + varType: bool + defaultValue: false + - name: set_storage_admin_roles + description: Grant roles/storage.admin role to storage_admins and bucket_storage_admins. + varType: bool + defaultValue: false + - name: lifecycle_rules + description: List of lifecycle rules to configure. Format is the same as described in provider documentation https://www.terraform.io/docs/providers/google/r/storage_bucket.html#lifecycle_rule except condition.matches_storage_class should be a comma delimited string. + varType: |- + set(object({ + # Object with keys: + # - type - The type of the action of this Lifecycle Rule. Supported values: Delete and SetStorageClass. + # - storage_class - (Required if action type is SetStorageClass) The target Storage Class of objects affected by this Lifecycle Rule. + action = map(string) + + # Object with keys: + # - age - (Optional) Minimum age of an object in days to satisfy this condition. + # - created_before - (Optional) Creation date of an object in RFC 3339 (e.g. 2017-06-13) to satisfy this condition. + # - with_state - (Optional) Match to live and/or archived objects. Supported values include: "LIVE", "ARCHIVED", "ANY". + # - matches_storage_class - (Optional) Comma delimited string for storage class of objects to satisfy this condition. Supported values include: MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, STANDARD, DURABLE_REDUCED_AVAILABILITY. + # - matches_prefix - (Optional) One or more matching name prefixes to satisfy this condition. + # - matches_suffix - (Optional) One or more matching name suffixes to satisfy this condition. + # - num_newer_versions - (Optional) Relevant only for versioned objects. The number of newer versions of an object to satisfy this condition. + # - custom_time_before - (Optional) A date in the RFC 3339 format YYYY-MM-DD. This condition is satisfied when the customTime metadata for the object is set to an earlier date than the date used in this lifecycle condition. + # - days_since_custom_time - (Optional) The number of days from the Custom-Time metadata attribute after which this condition becomes true. + # - days_since_noncurrent_time - (Optional) Relevant only for versioned objects. Number of days elapsed since the noncurrent timestamp of an object. + # - noncurrent_time_before - (Optional) Relevant only for versioned objects. The date in RFC 3339 (e.g. 2017-06-13) when the object became nonconcurrent. + condition = map(string) + })) + defaultValue: [] + - name: bucket_lifecycle_rules + description: Additional lifecycle_rules for specific buckets. Map of lowercase unprefixed name => list of lifecycle rules to configure. + varType: |- + map(set(object({ + # Object with keys: + # - type - The type of the action of this Lifecycle Rule. Supported values: Delete and SetStorageClass. + # - storage_class - (Required if action type is SetStorageClass) The target Storage Class of objects affected by this Lifecycle Rule. + action = map(string) + + # Object with keys: + # - age - (Optional) Minimum age of an object in days to satisfy this condition. + # - created_before - (Optional) Creation date of an object in RFC 3339 (e.g. 2017-06-13) to satisfy this condition. + # - with_state - (Optional) Match to live and/or archived objects. Supported values include: "LIVE", "ARCHIVED", "ANY". + # - matches_storage_class - (Optional) Comma delimited string for storage class of objects to satisfy this condition. Supported values include: MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, STANDARD, DURABLE_REDUCED_AVAILABILITY. + # - num_newer_versions - (Optional) Relevant only for versioned objects. The number of newer versions of an object to satisfy this condition. + # - custom_time_before - (Optional) A date in the RFC 3339 format YYYY-MM-DD. This condition is satisfied when the customTime metadata for the object is set to an earlier date than the date used in this lifecycle condition. + # - days_since_custom_time - (Optional) The number of days from the Custom-Time metadata attribute after which this condition becomes true. + # - days_since_noncurrent_time - (Optional) Relevant only for versioned objects. Number of days elapsed since the noncurrent timestamp of an object. + # - noncurrent_time_before - (Optional) Relevant only for versioned objects. The date in RFC 3339 (e.g. 2017-06-13) when the object became nonconcurrent. + condition = map(string) + }))) + defaultValue: {} + - name: cors + description: "Set of maps of mixed type attributes for CORS values. See appropriate attribute types here: https://www.terraform.io/docs/providers/google/r/storage_bucket.html#cors" + varType: set(any) + defaultValue: [] + - name: website + description: "Map of website values. Supported attributes: main_page_suffix, not_found_page" + varType: map(any) + defaultValue: {} + - name: retention_policy + description: Map of retention policy values. Format is the same as described in provider documentation https://www.terraform.io/docs/providers/google/r/storage_bucket#retention_policy + varType: any + defaultValue: {} + - name: custom_placement_config + description: Map of lowercase unprefixed name => custom placement config object. Format is the same as described in provider documentation https://www.terraform.io/docs/providers/google/r/storage_bucket#custom_placement_config + varType: any + defaultValue: {} + - name: logging + description: Map of lowercase unprefixed name => bucket logging config object. Format is the same as described in provider documentation https://www.terraform.io/docs/providers/google/r/storage_bucket.html#logging + varType: any + defaultValue: {} + - name: set_hmac_access + description: Set S3 compatible access to GCS. + varType: bool + defaultValue: false + - name: hmac_service_accounts + description: List of HMAC service accounts to grant access to GCS. + varType: map(string) + defaultValue: {} + - name: public_access_prevention + description: Prevents public access to a bucket. Acceptable values are inherited or enforced. If inherited, the bucket uses public access prevention, only if the bucket is subject to the public access prevention organization policy constraint. + varType: string + defaultValue: inherited + outputs: + - name: bucket + description: Bucket resource (for single use). + - name: buckets + description: Bucket resources as list. + - name: buckets_map + description: Bucket resources by name. + - name: hmac_keys + description: List of HMAC keys. + - name: name + description: Bucket name (for single use). + - name: names + description: Bucket names. + - name: names_list + description: List of bucket names. + - name: url + description: Bucket URL (for single use). + - name: urls + description: Bucket URLs. + - name: urls_list + description: List of bucket URLs. + requirements: + roles: + - level: Project + roles: + - roles/storage.admin + - roles/iam.serviceAccountUser + services: + - iam.googleapis.com + - storage-api.googleapis.com + - cloudresourcemanager.googleapis.com + - compute.googleapis.com + - serviceusage.googleapis.com + providerVersions: + - source: hashicorp/google + version: ">= 4.42, < 5.0" + - source: hashicorp/random + version: ">= 2.1" diff --git a/cli/bpmetadata/int-test/workflow.sh b/cli/bpmetadata/int-test/workflow.sh new file mode 100755 index 00000000000..8365732901e --- /dev/null +++ b/cli/bpmetadata/int-test/workflow.sh @@ -0,0 +1,86 @@ +#! /bin/bash +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This intergration test does the following: +# 1. Pulls downs a blueprint package with a specific version tag for metadata generation & validation +# 2. Generates metadata using the `metadata` subcommand on the blueprint version pulled from Github +# 3. Runs metadata validation on golden blueprints stored under goldens/. This is to ensure that the +# latest schema hasn't caused any regressions to the existing/live metadata format. This is most +# benenificial for validating hand-authored fields. +# 4. Runs `git diff` to compare the golden metadata to the generated metadata to expose any changes in +# schema for the autogenerated fields. + +CURRENT_DIR=$1 +WORKING_FOLDER=".working" +BLUPRINT_FOLDER=".blueprint" +GIT_FOLDER="git" +GOLDENS_FOLDER="goldens" +GOLDEN_METADATA="golden-metadata.yaml" +GOLDEN_DISPLAY_METADATA="golden-metadata.display.yaml" +WORKING_METADATA="metadata.yaml" +WORKING_DISPLAY_METADATA="metadata.display.yaml" + +if [[ -n $CURRENT_DIR ]]; then + WORKING_FOLDER="$CURRENT_DIR/.working" +fi + +# Create a temporary working folder to create assets for +# the integration test. If the temp folder already exists, remove it. +if [ -d $WORKING_FOLDER ] +then + rm -r -f $WORKING_FOLDER +fi + +mkdir $WORKING_FOLDER && cd $WORKING_FOLDER + +# Get the blueprint package for v4.0.0 specifically because the golden metadata +# to be validated is for that version. +git config --global advice.detachedHead false +git clone -b v4.0.0 --single-branch https://github.com/terraform-google-modules/terraform-google-cloud-storage.git "./$BLUPRINT_FOLDER/" +../../../bin/cft blueprint metadata -p $BLUPRINT_FOLDER -d -q -f + +mkdir $GIT_FOLDER +cp "../$GOLDENS_FOLDER/$GOLDEN_METADATA" "$GIT_FOLDER/$WORKING_METADATA" +cp "../$GOLDENS_FOLDER/$GOLDEN_DISPLAY_METADATA" "$GIT_FOLDER/$WORKING_DISPLAY_METADATA" + +cd "$GIT_FOLDER" +# Confirm if the goldens are still valid with the blueprint schema +../../../../bin/cft blueprint metadata -v +rval=$? + +if [ $rval -ne 0 ]; then + echo "Error! Unable to validate the golden metadata(s)." + exit $rval +fi + +# Compare golden metadata to the generated metadata. +git init +git add . +cp "../$BLUPRINT_FOLDER/$WORKING_METADATA" "$WORKING_METADATA" +cp "../$BLUPRINT_FOLDER/$WORKING_DISPLAY_METADATA" "$WORKING_DISPLAY_METADATA" +git diff --exit-code --quiet +rval=$? + +if [ $rval -eq 1 ]; then + echo "Error! Generated metadata(s) do not match the golden(s)." + git diff > diff.txt + cat diff.txt + exit $rval +elif [ $rval -gt 1 ]; then + echo "Error occurred while comparaing metadata(s) to golden." + exit $rval +fi + +echo "Success: generated metadata(s) match the golden(s)." diff --git a/cli/bpmetadata/main.go b/cli/bpmetadata/main.go new file mode 100644 index 00000000000..59f70ada1d9 --- /dev/null +++ b/cli/bpmetadata/main.go @@ -0,0 +1,8 @@ +package bpmetadata + +import ( + log "github.com/inconshreveable/log15" +) + +// bpmetadata log15 handler +var Log = log.New() diff --git a/cli/bpmetadata/markdown.go b/cli/bpmetadata/markdown.go new file mode 100644 index 00000000000..55603d02144 --- /dev/null +++ b/cli/bpmetadata/markdown.go @@ -0,0 +1,220 @@ +package bpmetadata + +import ( + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/gomarkdown/markdown" + "github.com/gomarkdown/markdown/ast" +) + +type mdContent struct { + literal string + url string + listItems []mdListItem +} + +type mdListItem struct { + text string + url string +} + +var reTimeEstimate = regexp.MustCompile(`(Configuration|Deployment):\s([0-9]+)\smins`) + +// getMdContent accepts 3 types of content requests and return and mdContent object +// with the relevant content info. The 3 scenarios are: +// 1: get heading literal by (level and/or order) OR by title +// 2: get paragraph content immediately following a heading by (level and/or order) OR by title +// 3: get list item content immediately following a heading by (level and/or order) OR by title +// A -1 value to headLevel/headOrder enforces the content to be matchd by headTitle +func getMdContent(content []byte, headLevel int, headOrder int, headTitle string, getContent bool) (*mdContent, error) { + mdDocument := markdown.Parse(content, nil) + orderCtr := 0 + mdSections := mdDocument.GetChildren() + var foundHead bool + for _, section := range mdSections { + // if the first child is nil, it's a comment and we don't + // need to evaluate it + if ast.GetFirstChild(section) == nil { + continue + } + + currLeaf := ast.GetFirstChild(section).AsLeaf() + switch sectionType := section.(type) { + case *ast.Heading: + foundHead = false + if headTitle == string(currLeaf.Literal) { + foundHead = true + } + + if headLevel == sectionType.Level { + orderCtr++ + } + + if !getContent && (headOrder == orderCtr || foundHead) { + return &mdContent{ + literal: string(currLeaf.Literal), + }, nil + } + + case *ast.Paragraph: + if getContent && (headOrder == orderCtr || foundHead) { + // check if the content is a link + l := ast.GetLastChild(currLeaf.Parent) + lNode, isLink := l.(*ast.Link) + if isLink { + return &mdContent{ + literal: string(ast.GetFirstChild(lNode).AsLeaf().Literal), + url: string(lNode.Destination), + }, nil + } + + return &mdContent{ + literal: string(currLeaf.Literal), + }, nil + } + + case *ast.List: + if getContent && (headOrder == orderCtr || foundHead) { + var mdListItems []mdListItem + for _, c := range sectionType.Container.Children { + var listItem mdListItem + // each item is a list with data and metadata about the list item + itemConfigs := ast.GetFirstChild(c).AsContainer().Children + // if the length of the child node is 1, it is a plain text list item + // if the length is greater the 1, it is a list item with a link + if len(itemConfigs) == 1 { + listItemText := string(itemConfigs[0].AsLeaf().Literal) + listItem = mdListItem{ + text: listItemText, + } + } else if len(itemConfigs) > 1 { + // the second child node has the link data and metadata + listItemLink := itemConfigs[1].(*ast.Link) + listItemText := string(ast.GetFirstChild(listItemLink).AsLeaf().Literal) + + listItem = mdListItem{ + text: listItemText, + url: string(listItemLink.Destination), + } + } + + mdListItems = append(mdListItems, listItem) + } + + return &mdContent{ + listItems: mdListItems, + }, nil + } + } + } + + return nil, fmt.Errorf("unable to find md content") +} + +// getDeploymentDuration creates the deployment and configuration time +// estimates for the blueprint from README.md +func getDeploymentDuration(content []byte, headTitle string) (*BlueprintTimeEstimate, error) { + durationDetails, err := getMdContent(content, -1, -1, headTitle, true) + if err != nil { + return nil, err + } + + matches := reTimeEstimate.FindAllStringSubmatch(durationDetails.literal, -1) + if len(matches) == 0 { + return nil, fmt.Errorf("unable to find deployment duration") + } + + var timeEstimate BlueprintTimeEstimate + for _, m := range matches { + // each m[2] will have the time in mins + i, err := strconv.ParseInt(m[2], 10, 64) + if err != nil { + continue + } + + if m[1] == "Configuration" { + timeEstimate.ConfigurationSecs = i * 60 + continue + } + + if m[1] == "Deployment" { + timeEstimate.DeploymentSecs = i * 60 + continue + } + } + + return &timeEstimate, nil +} + +// getCostEstimate creates the cost estimates from the cost calculator +// links provided in README.md +func getCostEstimate(content []byte, headTitle string) (*BlueprintCostEstimate, error) { + costDetails, err := getMdContent(content, -1, -1, headTitle, true) + if err != nil { + return nil, err + } + + return &BlueprintCostEstimate{ + Description: costDetails.literal, + Url: costDetails.url, + }, nil +} + +// getArchitctureInfo parses and builds Architecture details from README.md +func getArchitctureInfo(content []byte, headTitle string) (*BlueprintArchitecture, error) { + mdDocument := markdown.Parse(content, nil) + if mdDocument == nil { + return nil, fmt.Errorf("unable to parse md content") + } + + children := mdDocument.GetChildren() + for _, node := range children { + h, isHeading := node.(*ast.Heading) + if !isHeading { + continue + } + + // check if this is the architecture heading + hLiteral := string(ast.GetFirstChild(h).AsLeaf().Literal) + if hLiteral != headTitle { + continue + } + + //get architecture details + infoNode := ast.GetNextNode(h) + paraNode, isPara := infoNode.(*ast.Paragraph) + if !isPara { + continue + } + + t := ast.GetLastChild(paraNode) + _, isText := t.(*ast.Text) + if !isText { + continue + } + + d := strings.TrimLeft(string(t.AsLeaf().Literal), "\n") + dList := strings.Split(d, "\n") + i := ast.GetPrevNode(t) + iNode, isImage := i.(*ast.Image) + if isImage { + return &BlueprintArchitecture{ + Description: dList, + DiagramUrl: string(iNode.Destination), + }, nil + } + + lNode, isLink := i.(*ast.Link) + if isLink { + return &BlueprintArchitecture{ + Description: dList, + DiagramUrl: string(lNode.Destination), + }, nil + } + } + + return nil, fmt.Errorf("unable to find architecture content") +} diff --git a/cli/bpmetadata/markdown_test.go b/cli/bpmetadata/markdown_test.go new file mode 100644 index 00000000000..b76426449f9 --- /dev/null +++ b/cli/bpmetadata/markdown_test.go @@ -0,0 +1,327 @@ +package bpmetadata + +import ( + "os" + "path" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + mdTestdataPath = "../testdata/bpmetadata/md" +) + +func TestProcessMarkdownContent(t *testing.T) { + tests := []struct { + name string + fileName string + level int + order int + title string + getContent bool + want *mdContent + }{ + { + name: "level 1 heading", + fileName: "simple-content.md", + level: 1, + order: 1, + getContent: false, + want: &mdContent{ + literal: "h1 doc title", + }, + }, + { + name: "level 1 heading order 2", + fileName: "simple-content.md", + level: 1, + order: 2, + getContent: false, + want: nil, + }, + { + name: "level 2 heading order 2", + fileName: "simple-content.md", + level: 2, + order: 2, + getContent: false, + want: &mdContent{ + literal: "Horizontal Rules", + }, + }, + { + name: "level 1 content", + fileName: "simple-content.md", + level: 1, + order: 1, + getContent: true, + want: &mdContent{ + literal: "some content doc title for h1", + }, + }, + { + name: "level 3 content order 2", + fileName: "simple-content.md", + level: 3, + order: 2, + getContent: true, + want: &mdContent{ + literal: "some more content sub heading for h3", + }, + }, + { + name: "content by head title", + fileName: "simple-content.md", + level: -1, + order: -1, + title: "h3 sub sub heading", + getContent: true, + want: &mdContent{ + literal: "some content sub heading for h3", + }, + }, + { + name: "Tagline does not exist", + fileName: "simple-content.md", + level: -1, + order: -1, + title: "Tagline", + getContent: true, + want: nil, + }, + { + name: "Architecture description exists as diagram content", + fileName: "list-content.md", + level: -1, + order: -1, + title: "Architecture", + getContent: true, + want: &mdContent{ + listItems: []mdListItem{ + { + text: "User requests are sent to the front end, which is deployed on two Cloud Run services as containers to support high scalability applications.", + }, + { + text: "The request then lands on the middle tier, which is the API layer that provides access to the backend. This is also deployed on Cloud Run for scalability and ease of deployment in multiple languages. This middleware is a Golang based API.", + }, + }, + }, + }, + { + name: "content by head title does not exist", + fileName: "simple-content.md", + level: -1, + order: -1, + title: "Horizontal Rules", + getContent: true, + want: nil, + }, + { + name: "content by head title link list items", + fileName: "list-content.md", + level: -1, + order: -1, + title: "Documentation", + getContent: true, + want: &mdContent{ + listItems: []mdListItem{ + { + text: "document-01", + url: "http://google.com/doc-01", + }, + { + text: "document-02", + url: "http://google.com/doc-02", + }, + { + text: "document-03", + url: "http://google.com/doc-03", + }, + { + text: "document-04", + url: "http://google.com/doc-04", + }, + }, + }, + }, + { + name: "content by head title list items", + fileName: "list-content.md", + level: -1, + order: -1, + title: "Diagrams", + getContent: true, + want: &mdContent{ + listItems: []mdListItem{ + { + text: "text-document-01", + }, + { + text: "text-document-02", + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + content, err := os.ReadFile(path.Join(mdTestdataPath, tt.fileName)) + require.NoError(t, err) + got, _ := getMdContent(content, tt.level, tt.order, tt.title, tt.getContent) + assert.Equal(t, tt.want, got) + }) + } +} + +func TestProcessArchitectureContent(t *testing.T) { + tests := []struct { + name string + fileName string + title string + want *BlueprintArchitecture + wantErr bool + wantFileErr bool + }{ + { + name: "Architecture details exists as BlueprintArchitecture", + fileName: "list-content.md", + title: "Architecture", + want: &BlueprintArchitecture{ + Description: []string{ + `1. Step 1`, + `2. Step 2`, + `3. Step 3`, + }, + DiagramUrl: "https://i.redd.it/w3kr4m2fi3111.png", + }, + }, + { + name: "Architecture details don't exist as BlueprintArchitecture", + fileName: "list-content.md", + title: "ArchitectureNotValid", + wantErr: true, + }, + { + name: "md content file path for BlueprintArchitecture is invalid", + fileName: "list-content-bad-file-name.md", + title: "Architecture", + wantErr: true, + wantFileErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + content, err := os.ReadFile(path.Join(mdTestdataPath, tt.fileName)) + if (err != nil) != tt.wantFileErr { + t.Errorf("ReadFile() = %v, wantErr %v", err, tt.wantFileErr) + return + } + + got, err := getArchitctureInfo(content, tt.title) + if (err != nil) != tt.wantErr { + t.Errorf("getArchitctureInfo() error = %v, wantErr %v", err, tt.wantErr) + return + } + assert.Equal(t, tt.want, got) + }) + } +} + +func TestProcessDeploymentDurationContent(t *testing.T) { + tests := []struct { + name string + fileName string + title string + want *BlueprintTimeEstimate + wantErr bool + }{ + { + name: "Deployment duration details exists as BlueprintTimeEstimate", + fileName: "simple-content.md", + title: "Deployment Duration", + want: &BlueprintTimeEstimate{ + ConfigurationSecs: 120, + DeploymentSecs: 600, + }, + }, + { + name: "Deployment duration details don't exist as BlueprintTimeEstimate", + fileName: "simple-content.md", + title: "Deployment Duration Invalid", + wantErr: true, + }, + { + name: "Deployment duration exists but only for configuration", + fileName: "simple-content.md", + title: "Deployment Duration Only Config", + want: &BlueprintTimeEstimate{ + ConfigurationSecs: 120, + }, + }, + { + name: "md content file path for BlueprintTimeEstimate is invalid", + fileName: "simple-content-bad-file-name.md", + title: "Does not matter", + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + content, _ := os.ReadFile(path.Join(mdTestdataPath, tt.fileName)) + got, err := getDeploymentDuration(content, tt.title) + if (err != nil) != tt.wantErr { + t.Errorf("getDeploymentDuration() error = %v, wantErr %v", err, tt.wantErr) + return + } + assert.Equal(t, tt.want, got) + }) + } +} + +func TestProcessCostEstimateContent(t *testing.T) { + tests := []struct { + name string + fileName string + title string + want *BlueprintCostEstimate + wantErr bool + }{ + { + name: "Cost estimate details exists as BlueprintCostEstimate", + fileName: "simple-content.md", + title: "Cost", + want: &BlueprintCostEstimate{ + Description: "Solution cost details", + Url: "https://cloud.google.com/products/calculator?id=02fb0c45-cc29-4567-8cc6-f72ac9024add", + }, + }, + { + name: "Cost estimate details don't exist as BlueprintCostEstimate", + fileName: "simple-content.md", + title: "Cost Invalid", + wantErr: true, + }, + { + name: "md content file path for BlueprintCostEstimate is invalid", + fileName: "simple-content-bad-file-name.md", + title: "Does not matter", + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + content, _ := os.ReadFile(path.Join(mdTestdataPath, tt.fileName)) + got, err := getCostEstimate(content, tt.title) + if (err != nil) != tt.wantErr { + t.Errorf("getCostEstimate() error = %v, wantErr %v", err, tt.wantErr) + return + } + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/cli/bpmetadata/parser/state_parser.go b/cli/bpmetadata/parser/state_parser.go new file mode 100644 index 00000000000..42d4ad76565 --- /dev/null +++ b/cli/bpmetadata/parser/state_parser.go @@ -0,0 +1,64 @@ +package parser + +import ( + "bytes" + "encoding/json" + "fmt" + + "google.golang.org/protobuf/types/known/structpb" + + tfjson "github.com/hashicorp/terraform-json" + "github.com/zclconf/go-cty/cty" +) + +func ParseOutputTypesFromState(stateData []byte) (map[string]*structpb.Value, error) { + + var state tfjson.State + + // Unmarshal the state data into tfjson.State + err := json.Unmarshal(stateData, &state) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal state data: %w", err) + } + + outputTypeMap := make(map[string]*structpb.Value) + for name, output := range state.Values.Outputs { + pbValue, err := convertOutputTypeToStructpb(output) + if err != nil { + return nil, fmt.Errorf("failed to convert output %q to structpb.Value: %w", name, err) + } + outputTypeMap[name] = pbValue + } + + return outputTypeMap, nil +} + +func convertOutputTypeToStructpb(output *tfjson.StateOutput) (*structpb.Value, error) { + // Handle nil values explicitly + if output.Value == nil { + return structpb.NewNullValue(), nil + } + + // Handle cases where output.Type is NilType + if output.Type == cty.NilType { + return structpb.NewNullValue(), nil + } + + // Marshal the output value to JSON + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + enc.SetEscapeHTML(false) + err := enc.Encode(output.Type) + if err != nil { + return nil, fmt.Errorf("failed to marshal output type to JSON: %w", err) + } + + // Unmarshal the JSON into a structpb.Value + pbValue := &structpb.Value{} + err = pbValue.UnmarshalJSON(buf.Bytes()) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal JSON into structpb.Value: %w", err) + } + + return pbValue, nil +} diff --git a/cli/bpmetadata/parser/state_parser_test.go b/cli/bpmetadata/parser/state_parser_test.go new file mode 100644 index 00000000000..a97417509bc --- /dev/null +++ b/cli/bpmetadata/parser/state_parser_test.go @@ -0,0 +1,177 @@ +package parser + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "google.golang.org/protobuf/types/known/structpb" +) + +func TestParseOutputTypesFromState_WithSimpleTypes(t *testing.T) { + t.Parallel() + stateData := []byte(` +{ + "format_version": "1.0", + "terraform_version": "1.2.0", + "values": { + "outputs": { + "boolean_output": { + "type": "bool", + "value": true + }, + "number_output": { + "type": "number", + "value": 42 + }, + "string_output": { + "type": "string", + "value": "foo" + } + } + } +} +`) + want := map[string]*structpb.Value{ + "boolean_output": structpb.NewStringValue("bool"), + "number_output": structpb.NewStringValue("number"), + "string_output": structpb.NewStringValue("string"), + } + got, err := ParseOutputTypesFromState(stateData) + if err != nil { + t.Errorf("ParseOutputTypesFromState() error = %v", err) + return + } + if diff := cmp.Diff(got, want, cmp.Comparer(compareStructpbValues)); diff != "" { + t.Errorf("ParseOutputTypesFromState() mismatch (-got +want):\n%s", diff) + } +} + +func TestParseOutputTypesFromState_WithComplexTypes(t *testing.T) { + t.Parallel() + stateData := []byte(` +{ + "format_version": "1.0", + "terraform_version": "1.2.0", + "values": { + "outputs": { + "interpolated_deep": { + "type": [ + "object", + { + "foo": "string", + "map": [ + "object", + { + "bar": "string", + "id": "string" + } + ], + "number": "number" + } + ], + "value": { + "foo": "bar", + "map": { + "bar": "baz", + "id": "424881806176056736" + }, + "number": 42 + } + }, + "list_output": { + "type": [ + "tuple", + [ + "string", + "string" + ] + ], + "value": [ + "foo", + "bar" + ] + }, + "map_output": { + "type": [ + "object", + { + "foo": "string", + "number": "number" + } + ], + "value": { + "foo": "bar", + "number": 42 + } + } + } + } +} +`) + want := map[string]*structpb.Value{ + "interpolated_deep": structpb.NewListValue(&structpb.ListValue{Values: []*structpb.Value{ + structpb.NewStringValue("object"), + structpb.NewStructValue(&structpb.Struct{Fields: map[string]*structpb.Value{ + "foo": structpb.NewStringValue("string"), + "map": structpb.NewListValue(&structpb.ListValue{Values: []*structpb.Value{structpb.NewStringValue("object"), structpb.NewStructValue(&structpb.Struct{Fields: map[string]*structpb.Value{"bar": structpb.NewStringValue("string"), "id": structpb.NewStringValue("string")}})}}), + "number": structpb.NewStringValue("number"), + }}), + }}), + "list_output": structpb.NewListValue(&structpb.ListValue{Values: []*structpb.Value{ + structpb.NewStringValue("tuple"), + structpb.NewListValue(&structpb.ListValue{Values: []*structpb.Value{structpb.NewStringValue("string"), structpb.NewStringValue("string")}}), + }}), + "map_output": structpb.NewListValue(&structpb.ListValue{Values: []*structpb.Value{ + structpb.NewStringValue("object"), + structpb.NewStructValue(&structpb.Struct{Fields: map[string]*structpb.Value{ + "foo": structpb.NewStringValue("string"), + "number": structpb.NewStringValue("number"), + }}), + }}), + } + got, err := ParseOutputTypesFromState(stateData) + if err != nil { + t.Errorf("ParseOutputTypesFromState() error = %v", err) + return + } + if diff := cmp.Diff(got, want, cmp.Comparer(compareStructpbValues)); diff != "" { + t.Errorf("ParseOutputTypesFromState() mismatch (-got +want):\n%s", diff) + } +} + +func TestParseOutputTypesFromState_WithoutTypes(t *testing.T) { + t.Parallel() + stateData := []byte(` +{ + "format_version": "1.0", + "terraform_version": "1.2.0", + "values": { + "outputs": { + "no_type_output": { + "value": "some_value" + } + } + } +} +`) + want := map[string]*structpb.Value{ + "no_type_output": structpb.NewNullValue(), // Expecting null value when type is missing + } + + got, err := ParseOutputTypesFromState(stateData) + if err != nil { + t.Errorf("ParseOutputTypesFromState() error = %v", err) + return + } + if diff := cmp.Diff(got, want, cmp.Comparer(compareStructpbValues)); diff != "" { + t.Errorf("ParseOutputTypesFromState() mismatch (-got +want):\n%s", diff) + } +} + +// compareStructpbValues is a custom comparer for structpb.Value +func compareStructpbValues(x, y *structpb.Value) bool { + // Marshal to JSON and compare the JSON strings + xJSON, _ := x.MarshalJSON() + yJSON, _ := y.MarshalJSON() + return string(xJSON) == string(yJSON) +} diff --git a/cli/bpmetadata/path.go b/cli/bpmetadata/path.go new file mode 100644 index 00000000000..4556f0a8611 --- /dev/null +++ b/cli/bpmetadata/path.go @@ -0,0 +1,92 @@ +package bpmetadata + +import ( + "fmt" + "io/fs" + "os" + "path/filepath" + "regexp" + "sort" + "strings" +) + +const ( + regexExamples = ".*/(examples/.*)" + regexModules = ".*/(modules/.*)" +) + +var ( + reExamples = regexp.MustCompile(regexExamples) + reModules = regexp.MustCompile(regexModules) +) + +func fileExists(path string) (bool, error) { + info, err := os.Stat(path) + if err != nil { + return false, fmt.Errorf("unable to read file at the provided path: %w", err) + } + + if info.IsDir() { + return false, fmt.Errorf("provided path is a directory, need a valid file path.") + } + + return true, nil +} + +func getExamples(configPath string) ([]*BlueprintMiscContent, error) { + return getDirPaths(configPath, reExamples) +} + +func getModules(configPath string) ([]*BlueprintMiscContent, error) { + return getDirPaths(configPath, reModules) +} + +// getDirPaths traverses a given path and looks for directories +// with TF configs while ignoring the .terraform* directories created and +// used internally by the Terraform CLI +func getDirPaths(configPath string, re *regexp.Regexp) ([]*BlueprintMiscContent, error) { + paths := []*BlueprintMiscContent{} + err := filepath.Walk(configPath, func(path string, info fs.FileInfo, err error) error { + if err != nil { + return fmt.Errorf("error accessing examples in the path %q: %w", configPath, err) + } + + // skip if this is a .terraform dir + if info.IsDir() && strings.HasPrefix(info.Name(), ".terraform") { + return filepath.SkipDir + } + + // only interested if it has a TF config + if !info.IsDir() && strings.HasSuffix(info.Name(), ".tf") { + d := filepath.Dir(path) + if l := trimPath(d, re); l != "" { + dirPath := &BlueprintMiscContent{ + Name: filepath.Base(d), + Location: l, + } + + paths = append(paths, dirPath) + } + return filepath.SkipDir + } + + return nil + }) + + if err != nil { + return nil, fmt.Errorf("error accessing examples in the path %q: %w", configPath, err) + } + + // Sort by configPath name before returning + sort.SliceStable(paths, func(i, j int) bool { return paths[i].Name < paths[j].Name }) + return paths, nil +} + +func trimPath(assetPath string, re *regexp.Regexp) string { + matches := re.FindStringSubmatch(assetPath) + if len(matches) > 1 { + return matches[1] + } + + return "" +} diff --git a/cli/bpmetadata/path_test.go b/cli/bpmetadata/path_test.go new file mode 100644 index 00000000000..7651c49dc75 --- /dev/null +++ b/cli/bpmetadata/path_test.go @@ -0,0 +1,158 @@ +package bpmetadata + +import ( + "path" + "regexp" + "sort" + "testing" + + "github.com/stretchr/testify/assert" +) + +const ( + bptestdataPath = "../testdata/bpmetadata" +) + +func TestIsPathValid(t *testing.T) { + tests := []struct { + name string + path string + want bool + wantErr bool + }{ + { + name: "valid", + path: "assets/icon.png", + want: true, + wantErr: false, + }, + { + name: "invalid", + path: "assets/icon2.png", + wantErr: true, + }, + { + name: "empty", + path: "", + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := fileExists(path.Join(bptestdataPath, tt.path)) + if (err != nil) != tt.wantErr { + t.Errorf("fileExists() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if got != tt.want { + t.Errorf("fileExists() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestDirContent(t *testing.T) { + tests := []struct { + name string + path string + regex string + want []*BlueprintMiscContent + wantErr bool + }{ + { + name: "valid examples", + path: "content/examples", + regex: regexExamples, + want: []*BlueprintMiscContent{ + { + Name: "terraform", + Location: "examples/acm/acm-terraform-blog-part1/terraform", + }, + { + Name: "acm-terraform-blog-part2", + Location: "examples/acm/acm-terraform-blog-part2", + }, + { + Name: "simple_regional", + Location: "examples/simple_regional", + }, + { + Name: "simple_regional_beta", + Location: "examples/simple_regional_beta", + }, + }, + wantErr: false, + }, + { + name: "valid modules", + path: "content/modules", + regex: regexModules, + want: []*BlueprintMiscContent{ + { + Name: "beta-public-cluster", + Location: "modules/beta-public-cluster", + }, + { + Name: "binary-authorization", + Location: "modules/binary-authorization", + }, + { + Name: "private-cluster", + Location: "modules/private-cluster", + }, + }, + wantErr: false, + }, + { + name: "invalid dir", + path: "content/modules2", + regex: regexModules, + wantErr: true, + }, + { + name: "some example folders without any tf", + path: "content/examples-some-without-tf/examples", + regex: regexExamples, + want: []*BlueprintMiscContent{ + { + Name: "terraform", + Location: "examples/acm/acm-terraform-blog-part1/terraform", + }, + { + Name: "simple_regional", + Location: "examples/simple_regional", + }, + }, + wantErr: false, + }, + { + name: "all module folders without any tf", + path: "content/modules-no-tf/modules", + regex: regexModules, + want: []*BlueprintMiscContent{}, + wantErr: false, + }, + { + name: "mismatched regex", + path: "content/modules", + regex: "badRegex", + want: []*BlueprintMiscContent{}, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + re := regexp.MustCompile(tt.regex) + sort.SliceStable(tt.want, func(i, j int) bool { return tt.want[i].Name < tt.want[j].Name }) + got, err := getDirPaths(path.Join(bptestdataPath, tt.path), re) + if (err != nil) != tt.wantErr { + t.Errorf("getDirPaths() error = %v, wantErr %v", err, tt.wantErr) + return + } + + assert.Equal(t, got, tt.want) + }) + } +} diff --git a/cli/bpmetadata/proto/bpmetadata.proto b/cli/bpmetadata/proto/bpmetadata.proto new file mode 100644 index 00000000000..7ffeeb2924b --- /dev/null +++ b/cli/bpmetadata/proto/bpmetadata.proto @@ -0,0 +1,531 @@ +syntax = "proto3"; + +package google.cloud.config.bpmetadata; + +import "google/protobuf/struct.proto"; +import "bpmetadata_ui.proto"; + +// TODO: update copybara configuration for go to java package transformation +option go_package = "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/cli/bpmetadata"; + +// BlueprintMetadata defines the overall structure for blueprint metadata. +// The cli command i.e. `cft blueprint metadata` attempts at auto-generating +// metadata if the blueprint is structured based on the TF blueprint template +// i.e. https://github.com/terraform-google-modules/terraform-google-module-template +// All fields within BlueprintMetadata and its children are denoted as: +// - Gen: auto-generated - +// - Gen: manually-authored +// - Gen: partial (contains nested messages that can include both auto-generated and manually authored) +message BlueprintMetadata { + // APIVersion is the apiVersion field of a metadata file + // Gen: auto-generated + string api_version = 1; // @gotags: json:"apiVersion,omitempty" yaml:"apiVersion,omitempty" + + // Kind is the kind field of a metadata file + // Gen: auto-generated + string kind = 2; // @gotags: json:"kind,omitempty" yaml:"kind,omitempty" + + // ResourceTypeMeta is the metadata field of a metadata file + // Gen: partial + ResourceTypeMeta metadata = 3; // @gotags: json:"metadata,omitempty" yaml:"metadata,omitempty" + + // BlueprintMetadataSpec is the metadata specification for the blueprint + // Gen: partial + BlueprintMetadataSpec spec = 4; // @gotags: yaml:"spec" json:"spec" +} + +message ResourceTypeMeta { + // Name is the metadata.name field of a Resource + // Gen: auto-generated + string name = 1; // @gotags: json:"name,omitempty" yaml:"name,omitempty" + + // Labels is the metadata.labels field of a Resource + // Gen: manually-authored + map labels = 2; // @gotags: json:"labels,omitempty" yaml:"labels,omitempty" + + // Annotations is the metadata.annotations field of a Resource. + // Gen: auto-generated + map annotations = 3; // @gotags: json:"annotations,omitempty" yaml:"annotations,omitempty" +} + +// BlueprintMetadataSpec defines the spec portion of the blueprint metadata. +message BlueprintMetadataSpec { + // BlueprintInfo defines the basic information of the blueprint. + // Gen: partial + BlueprintInfo info = 1; // @gotags: json:"info,omitempty" yaml:"info,omitempty" + + // BlueprintContent defines the detail for blueprint related content such as + // related documentation, diagrams, examples etc. + // Gen: partial + BlueprintContent content = 2; // @gotags: json:"content,omitempty" yaml:"content,omitempty" + + // BlueprintInterface defines the input and output variables for the blueprint. + // Gen: partial + BlueprintInterface interfaces = 3; // @gotags: json:"interfaces,omitempty" yaml:"interfaces,omitempty" + + // BlueprintRequirements defines the roles required and the associated services + // that need to be enabled to provision blueprint resources. + // Gen: auto-generated + BlueprintRequirements requirements = 4; // @gotags: json:"requirements,omitempty" yaml:"requirements,omitempty" + + // BlueprintUI defines the user interface for the blueprint. + // Gen: partial + BlueprintUI ui = 5; // @gotags: json:"ui,omitempty" yaml:"ui,omitempty" +} + +// BlueprintInfo defines the basic information of the blueprint. +message BlueprintInfo { + // Title for the blueprint. + // Gen: auto-generated - First H1 text in readme.md. + string title = 1; // @gotags: json:"title" yaml:"title" + + // Blueprint source location and source type. + // Gen: auto-generated - user will be prompted if repo information can not + // be determined from the blueprint path. + BlueprintRepoDetail source = 2; // @gotags: json:"source,omitempty" yaml:"source,omitempty" + + // Last released semantic version for the packaged blueprint. + // Gen: auto-generated - From the `module_name` attribute of + // the `provider_meta "google"` block. + // E.g. + // provider_meta "google" { + // module_name = "blueprints/terraform/terraform-google-log-analysis/v0.1.5" + // } + string version = 3; // @gotags: json:"version,omitempty" yaml:"version,omitempty" + + // Actuation tool e.g. Terraform and its required version. + // Gen: auto-generated + BlueprintActuationTool actuation_tool = 4; // @gotags: json:"actuationTool,omitempty" yaml:"actuationTool,omitempty" + + // Various types of descriptions associated with the blueprint. + // Gen: auto-generated + BlueprintDescription description = 5; // @gotags: json:"description,omitempty" yaml:"description,omitempty" + + // Path to an image representing the icon for the blueprint. + // Will be set as "assets/icon.png", if present. + // Gen: auto-generated + string icon = 6; // @gotags: json:"icon,omitempty" yaml:"icon,omitempty" + + // The time estimate for configuring and deploying the blueprint. + // Gen: auto-generated + BlueprintTimeEstimate deployment_duration = 7; // @gotags: json:"deploymentDuration,omitempty" yaml:"deploymentDuration,omitempty" + + // The cost estimate for the blueprint based on preconfigured variables. + // Gen: auto-generated + BlueprintCostEstimate cost_estimate = 8; // @gotags: json:"costEstimate,omitempty" yaml:"costEstimate,omitempty" + + // A list of GCP cloud products used in the blueprint. + // Gen: manually-authored + repeated BlueprintCloudProduct cloud_products = 9; // @gotags: json:"cloudProducts,omitempty" yaml:"cloudProducts,omitempty" + + // A configuration of fixed and dynamic GCP quotas that apply to the blueprint. + // Gen: manually-authored + repeated BlueprintQuotaDetail quota_details = 10; // @gotags: json:"quotaDetails,omitempty" yaml:"quotaDetails,omitempty" + + // Details on the author producing the blueprint. + // Gen: manually-authored + BlueprintAuthor author = 11; // @gotags: json:"author,omitempty" yaml:"author,omitempty" + + // Details on software installed as part of the blueprint. + // Gen: manually-authored + repeated BlueprintSoftwareGroup software_groups = 12; // @gotags: json:"softwareGroups,omitempty" yaml:"softwareGroups,omitempty" + + // Support offered, if any for the blueprint. + // Gen: manually-authored + BlueprintSupport support_info = 13; // @gotags: json:"supportInfo,omitempty" yaml:"supportInfo,omitempty" + + // A list of GCP org policies to be checked for successful deployment. + // Gen: manually-authored + repeated BlueprintOrgPolicyCheck org_policy_checks = 14; // @gotags: json:"orgPolicyChecks,omitempty" yaml:"orgPolicyChecks,omitempty" + + // Specifies if the blueprint supports single or multiple deployments per GCP project. + // If set to true, the blueprint can not be deployed more than once in the same GCP project. + // Gen: manually-authored + bool single_deployment = 15; // @gotags: json:"singleDeployment,omitempty" yaml:"singleDeployment,omitempty" +} + +// BlueprintContent defines the detail for blueprint related content such as +// related documentation, diagrams, examples etc. +message BlueprintContent { + // Gen: auto-generated + BlueprintArchitecture architecture = 1; // @gotags: json:"architecture,omitempty" yaml:"architecture,omitempty" + + // Gen: manually-authored + repeated BlueprintDiagram diagrams = 2; // @gotags: json:"diagrams,omitempty" yaml:"diagrams,omitempty" + + // Gen: auto-generated - the list content following the "## Documentation" tag. E.g. + // ## Documentation + // - [Hosting a Static Website](https://cloud.google.com/storage/docs/hosting-static-website) + repeated BlueprintListContent documentation = 3; // @gotags: json:"documentation,omitempty" yaml:"documentation,omitempty" + + // Gen: auto-generated - blueprints under the modules/ folder. + repeated BlueprintMiscContent sub_blueprints = 4; // @gotags: json:"subBlueprints,omitempty" yaml:"subBlueprints,omitempty" + + // Gen: auto-generated - examples under the examples/ folder. + repeated BlueprintMiscContent examples = 5; // @gotags: json:"examples,omitempty" yaml:"examples,omitempty" +} + +// BlueprintInterface defines the input and output variables for the blueprint. +message BlueprintInterface { + // Gen: auto-generated - all defined variables for the blueprint + repeated BlueprintVariable variables = 1; // @gotags: json:"variables,omitempty" yaml:"variables,omitempty" + + // Gen: manually-authored + repeated BlueprintVariableGroup variable_groups = 2; // @gotags: json:"variableGroups,omitempty" yaml:"variableGroups,omitempty" + + // Gen: auto-generated - all defined outputs for the blueprint + repeated BlueprintOutput outputs = 3; // @gotags: json:"outputs,omitempty" yaml:"outputs,omitempty" +} + +// BlueprintRequirements defines the roles required and the associated services +// that need to be enabled to provision blueprint resources. +message BlueprintRequirements { + // Gen: auto-generated - all roles required for the blueprint in test/setup/iam.tf + // as the "int_required_roles" local. E.g. + // locals { + // int_required_roles = [ + // "roles/compute.admin", + // ] + // } + repeated BlueprintRoles roles = 1; // @gotags: json:"roles,omitempty" yaml:"roles,omitempty" + + // Gen: auto-generated - all services required for the blueprint in test/setup/main.tf + // as "activate_apis" in the project module. + repeated string services = 2; // @gotags: json:"services,omitempty" yaml:"services,omitempty" + + // Required provider versions. + // Gen: auto-generated from required providers block. + repeated ProviderVersion provider_versions = 3; // @gotags: json:"providerVersions,omitempty" yaml:"providerVersions,omitempty" +} + +// ProviderVersion defines the required version for a provider. +message ProviderVersion { + // Provider source of form [hostname]/namespace/name. + // Hostname is optional defaulting to Terraform registry. + // Gen: auto-generated from required providers block. + string source = 1; // @gotags: json:"source,omitempty" yaml:"source,omitempty" + // Version constraint string. + // Gen: auto-generated from required providers block. + string version = 2; // @gotags: json:"version,omitempty" yaml:"version,omitempty" +} + +// BlueprintUI is the top-level structure for holding UI specific metadata. +message BlueprintUI { + // The top-level input section that defines the list of variables and + // their sections on the deployment page. + // Gen: partial + BlueprintUIInput input = 1; // @gotags: json:"input,omitempty" yaml:"input,omitempty" + + // The top-level section for listing runtime (or blueprint output) information + // i.e. the console URL for the VM or a button to ssh into the VM etc based on. + // Gen: manually-authored + BlueprintUIOutput runtime = 2; // @gotags: json:"runtime,omitempty" yaml:"runtime,omitempty" +} + +message BlueprintRepoDetail { + // Gen: auto-generated - URL from the .git dir. + // Can be manually overridden with a custom URL if needed. + string repo = 1; // @gotags: json:"repo" yaml:"repo" + + // Gen: auto-generated - set as "git" for now until more + // types are supported. + string source_type = 2; // @gotags: json:"sourceType" yaml:"sourceType" + + // Gen: auto-generated - not set for root modules but + // set as the module name for submodules, if found. + string dir = 3; // @gotags: json:"dir,omitempty" yaml:"dir,omitempty" +} + +// BlueprintActuationTool defines the actuation tool used to provision the blueprint. +message BlueprintActuationTool { + // Gen: auto-generated - set as "Terraform" for now until + //more flavors are supported. + string flavor = 1; // @gotags: json:"flavor,omitempty" yaml:"flavor,omitempty" + + // Required version for the actuation tool. + // Gen: auto-generated - For Terraform this is the `required_version` + // set in `terraform` block. E.g. + // terraform { + // required_version = ">= 0.13" + // } + string version = 2; // @gotags: json:"version,omitempty" yaml:"version,omitempty" +} + +// All descriptions are set with the markdown content immediately +// after each type's heading declaration in readme.md. +message BlueprintDescription { + // Gen: auto-generated - Markdown after "### Tagline". + string tagline = 1; // @gotags: json:"tagline,omitempty" yaml:"tagline,omitempty" + + // Gen: auto-generated - Markdown after "### Detailed". + string detailed = 2; // @gotags: json:"detailed,omitempty" yaml:"detailed,omitempty" + + // Gen: auto-generated - Markdown after "### PreDeploy". + string pre_deploy = 3; // @gotags: json:"preDeploy,omitempty" yaml:"preDeploy,omitempty" + + // Gen: auto-generated - Markdown after "### Html". + string html = 4; // @gotags: json:"html,omitempty" yaml:"html,omitempty" + + // Gen: auto-generated - Markdown after "### EulaUrls". + repeated string eula_urls = 5; // @gotags: json:"eulaUrls,omitempty" yaml:"eulaUrls,omitempty" + + // Gen: auto-generated - Markdown after "### Architecture" + // Deprecated. Use BlueprintContent.Architecture instead. + repeated string architecture = 6; // @gotags: json:"architecture,omitempty" yaml:"architecture,omitempty" +} + +// A time estimate in secs required for configuring and deploying the blueprint. +message BlueprintTimeEstimate { + // Gen: auto-generated - Set using the content defined under "### DeploymentTime" E.g. + // ### DeploymentTime + // - Configuration: X secs + // - Deployment: Y secs + int64 configuration_secs = 1; // @gotags: json:"configurationSecs,omitempty" yaml:"configurationSecs,omitempty" + int64 deployment_secs = 2; // @gotags: json:"deploymentSecs,omitempty" yaml:"deploymentSecs,omitempty" +} + +// The cost estimate for the blueprint based on pre-configured variables. +message BlueprintCostEstimate { + // Gen: auto-generated - Set using the content defined under "### Cost" as a link + // with a description E.g. + // ### Cost + // [$20.00](https://cloud.google.com/products/calculator?hl=en_US&_ga=2.1665458.-226505189.1675191136#id=02fb0c45-cc29-4567-8cc6-f72ac9024add) + string description = 1; // @gotags: json:"description" yaml:"description" + string url = 2; // @gotags: json:"url" yaml:"url" +} + +// GCP cloud product(s) used in the blueprint. +message BlueprintCloudProduct { + // A top-level (e.g. "Compute Engine") or secondary (e.g. "Binary Authorization") + // product used in the blueprint. + // Gen: manually-authored + string product_id = 1; // @gotags: json:"productId,omitempty" yaml:"productId,omitempty" + + // Url for the product. + // Gen: manually-authored + string page_url = 2; // @gotags: json:"pageUrl" yaml:"pageUrl" + + // A label string for the product, if it is not an integrated GCP product. + // E.g. "Data Studio" + // Gen: manually-authored + string label = 3; // @gotags: json:"label,omitempty" yaml:"label,omitempty" + + // Is the product's landing page external to the GCP console e.g. + // lookerstudio.google.com + // Gen: manually-authored + bool is_external = 4; // @gotags: json:"isExternal,omitempty" yaml:"isExternal,omitempty" +} + +// BlueprintOrgPolicyCheck defines GCP org policies to be checked +// for successful deployment +message BlueprintOrgPolicyCheck { + // Id for the policy e.g. "compute-vmExternalIpAccess" + // Gen: manually-authored + string policy_id = 1; // @gotags: json:"policyId" yaml:"policyId" + + // If not set, it is assumed any version of this org policy + // prevents successful deployment of this solution. + // Gen: manually-authored + repeated string required_values = 2; // @gotags: json:"requiredValues,omitempty" yaml:"requiredValues,omitempty" +} + +// QuotaResourceType defines the type of resource a quota is applied to. +enum QuotaResourceType { + QRT_UNDEFINED = 0; + QRT_RESOURCE_TYPE_GCE_INSTANCE = 1; + QRT_RESOURCE_TYPE_GCE_DISK = 2; +} + +// BlueprintQuotaDetail defines the quota details for a blueprint. +message BlueprintQuotaDetail { + // DynamicVariable, if provided, associates the provided input variable + // with the corresponding resource and quota type. In its absence, the quota + // detail is assumed to be fixed. + // Gen: manually-authored + string dynamic_variable = 1; // @gotags: json:"dynamicVariable,omitempty" yaml:"dynamicVariable,omitempty" + + // ResourceType is the type of resource the quota will be applied to i.e. + // GCE Instance or Disk etc. + // Gen: manually-authored + QuotaResourceType resource_type = 2; // @gotags: json:"resourceType" yaml:"resourceType" + + // QuotaType is a key/value pair of the actual quotas and their corresponding + // values. Valid keys for quota_type can be: + // MACHINE_TYPE, + // CPUs, + // DISK_TYPE OR + // SIZE_GB. + // Gen: manually-authored + map quota_type = 3; // @gotags: json:"quotaType" yaml:"quotaType" +} + +// BlueprintAuthor defines the author of a blueprint. +message BlueprintAuthor { + // Name of template author or organization. + // Gen: manually-authored + string title = 1; // @gotags: json:"title" yaml:"title" + + // Description of the author. + // Gen: manually-authored + string description = 2; // @gotags: json:"description,omitempty" yaml:"description,omitempty" + + // Link to the author's website. + // Gen: manually-authored + string url = 3; // @gotags: json:"url,omitempty" yaml:"url,omitempty" +} + +// SoftwareGroupType is a string enum representing the different types of software groups. +enum SoftwareGroupType { + // UNSPECIFIED is the default value for SoftwareGroupType. + SG_UNSPECIFIED = 0; + // OS is a software group that represents an operating system. + SG_OS = 1; +} + +// A group of related software components for the blueprint. +message BlueprintSoftwareGroup { + // Pre-defined software types. + // Gen: manually-authored + SoftwareGroupType type = 1; // @gotags: json:"type,omitempty" yaml:"type,omitempty" + + // Software components belonging to this group. + // Gen: manually-authored + repeated BlueprintSoftware software = 2; // @gotags: json:"software,omitempty" yaml:"software,omitempty" +} + +// A description of a piece of a single software component +// installed by the blueprint. +message BlueprintSoftware { + // User-visible title. + // Gen: manually-authored + string title = 1; // @gotags: json:"title" yaml:"title" + + // Software version. + // Gen: manually-authored + string version = 2; // @gotags: json:"version,omitempty" yaml:"version,omitempty" + + // Link to development site or marketing page for this software. + // Gen: manually-authored + string url = 3; // @gotags: json:"url,omitempty" yaml:"url,omitempty" + + // Link to license page. + // Gen: manually-authored + string license_url = 4; // @gotags: json:"licenseUrl,omitempty" yaml:"licenseUrl,omitempty" +} + +// A description of a support option +message BlueprintSupport { + // Description of the support option. + // Gen: manually-authored + string description = 1; // @gotags: json:"description" yaml:"description" + + // Link to the page providing this support option. + // Gen: manually-authored + string url = 2; // @gotags: json:"url,omitempty" yaml:"url,omitempty" + + // The organization or group that provides the support option (e.g.: + // "Community", "Google"). + // Gen: manually-authored + string entity = 3; // @gotags: json:"entity,omitempty" yaml:"entity,omitempty" + + // Whether to show the customer's support ID. + // Gen: manually-authored + bool show_support_id = 4; // @gotags: json:"showSupportId,omitempty" yaml:"showSupportId,omitempty" +} + +message BlueprintArchitecture { + // Gen: auto-generated - the URL & list content following the "## Architecture" tag e.g. + // ## Architecture + // ![Blueprint Architecture](assets/architecture.png) + // 1. Step no. 1 + // 2. Step no. 2 + // 3. Step no. 3 + string diagram_url = 1; // @gotags: json:"diagramUrl" yaml:"diagramUrl" + + // Gen: auto-generated - the list items following the "## Architecture" tag. + repeated string description = 2; // @gotags: json:"description" yaml:"description" +} + +message BlueprintMiscContent { + string name = 1; // @gotags: json:"name" yaml:"name" + string location = 2; // @gotags: json:"location,omitempty" yaml:"location,omitempty" +} + +message BlueprintDiagram { + string name = 1; // @gotags: json:"name" yaml:"name" + string alt_text = 2; // @gotags: json:"altText,omitempty" yaml:"altText,omitempty" + string description = 3; // @gotags: json:"description,omitempty" yaml:"description,omitempty" +} + +message BlueprintListContent { + string title = 1; // @gotags: json:"title" yaml:"title" + string url = 2; // @gotags: json:"url,omitempty" yaml:"url,omitempty" +} + +message BlueprintVariable { + string name = 1; // @gotags: json:"name,omitempty" yaml:"name,omitempty" + string description = 2; // @gotags: json:"description,omitempty" yaml:"description,omitempty" + string var_type = 3; // @gotags: json:"varType,omitempty" yaml:"varType,omitempty" + google.protobuf.Value default_value = 4; // @gotags: json:"defaultValue,omitempty" yaml:"defaultValue,omitempty" + bool required = 5; // @gotags: json:"required,omitempty" yaml:"required,omitempty" + // Incoming connections to this variable. + // Connections are outputs from other blueprints that can be potentially + // connected to this variable. + // Gen: manually-authored. + repeated BlueprintConnection connections = 6; // @gotags: json:"connections,omitempty" yaml:"connections,omitempty" +} + +// Defines an incoming connection from a blueprint. +message BlueprintConnection { + // Source of the connection. + // Gen: manually-authored. + ConnectionSource source = 1; // @gotags: json:"source,omitempty" yaml:"source,omitempty" + // Connection specifications. + // Gen: manually-authored. + ConnectionSpec spec = 2; // @gotags: json:"spec,omitempty" yaml:"spec,omitempty" +} + +// Defines the source of a connection. +message ConnectionSource { + // Source of the connection. Defined using the same format as module source + // of form [hostname]/namespace/name/provider for registry references and + // unprefixed github.com URLs for github references. + // Gen: manually-authored. + string source = 1; // @gotags: json:"source,omitempty" yaml:"source,omitempty" + // Version constraint syntax using the same format as module version + // constraints. + // Gen: manually-authored. + string version = 2; // @gotags: json:"version,omitempty" yaml:"version,omitempty" +} + +// Defines the specifications of a connection. +message ConnectionSpec { + // Output expression identifying output being connected to variable. + // This can be the output name or more complex expression like attribuite notation. + // Gen: manually-authored. + string output_expr = 1; // @gotags: json:"outputExpr,omitempty" yaml:"outputExpr,omitempty" + // Optional dot separated attribuite notation to connect to a specific object field of the input variable. + // Gen: manually-authored. + optional string input_path = 2; // @gotags: json:"inputPath,omitempty" yaml:"inputPath,omitempty" +} + +// BlueprintVariableGroup is manually entered. +message BlueprintVariableGroup { + string name = 1; // @gotags: json:"name" yaml:"name" + string description = 2; // @gotags: json:"description,omitempty" yaml:"description,omitempty" + repeated string variables = 3; // @gotags: json:"variables,omitempty" yaml:"variables,omitempty" +} + +message BlueprintOutput { + string name = 1; // @gotags: json:"name" yaml:"name" + string description = 2; // @gotags: json:"description,omitempty" yaml:"description,omitempty" + // Serialized type representation of the output value. + // Gen: manually-authored but will be automated in the future. + optional google.protobuf.Value type = 3; // @gotags: json:"type,omitempty" yaml:"type,omitempty" + +} + +message BlueprintRoles { + string level = 1; // @gotags: json:"level" yaml:"level" + repeated string roles = 2; // @gotags: json:"roles" yaml:"roles" +} diff --git a/cli/bpmetadata/proto/bpmetadata_ui.proto b/cli/bpmetadata/proto/bpmetadata_ui.proto new file mode 100644 index 00000000000..7d1ac390f2f --- /dev/null +++ b/cli/bpmetadata/proto/bpmetadata_ui.proto @@ -0,0 +1,306 @@ +syntax = "proto3"; + +package google.cloud.config.bpmetadata; + +import "google/protobuf/struct.proto"; +import "bpmetadata_ui_ext.proto"; + +// TODO: update copybara configuration for go to java package transformation +option go_package = "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/cli/bpmetadata"; + +// BlueprintUIInput is the structure for holding Input and Input Section (i.e. groups) specific metadata. +message BlueprintUIInput { + // variables is a map defining all inputs on the UI. + // Gen: partial + map variables = 1; // @gotags: json:"variables,omitempty" yaml:"variables,omitempty" + + // Sections is a generic structure for grouping inputs together. + // Gen: manually-authored + repeated DisplaySection sections = 2; // @gotags: json:"sections,omitempty" yaml:"sections,omitempty" + + // List of boolean groups that will be referenced by properties. + // Gen: manually-authored + repeated BooleanGroup boolean_groups = 3; // @gotags: json:"booleanGroups,omitempty" yaml:"booleanGroups,omitempty" +} + +// Additional display specific metadata pertaining to a particular +// input variable. +message DisplayVariable { + reserved 19; + + // The variable name from the corresponding standard metadata file. + // Gen: auto-generated - the Terraform variable name + string name = 1; // @gotags: json:"name" yaml:"name" + + // Visible title for the variable on the UI. If not present, + // Name will be used for the Title. + // Gen: auto-generated - the Terraform variable converted to title case e.g. + // variable "bucket_admins" will convert to "Bucket Admins" as the title. + string title = 2; // @gotags: json:"title" yaml:"title" + + // A flag to hide or show the variable on the UI. + // Gen: manually-authored + bool invisible = 3; // @gotags: json:"invisible,omitempty" yaml:"invisible,omitempty" + + // Variable tooltip. + // Gen: manually-authored + string tooltip = 4; // @gotags: json:"tooltip,omitempty" yaml:"tooltip,omitempty" + + // Placeholder text (when there is no default). + // Gen: manually-authored + string placeholder = 5; // @gotags: json:"placeholder,omitempty" yaml:"placeholder,omitempty" + + // Regex based validation rules for the variable. + // Gen: manually-authored + string regex_validation = 6; // @gotags: json:"regexValidation,omitempty" yaml:"regexValidation,omitempty" + + // Minimum no. of inputs for the input variable. + // Gen: manually-authored + int32 min_items = 7; // @gotags: json:"minItems,omitempty" yaml:"minItems,omitempty" + + // Max no. of inputs for the input variable. + // Gen: manually-authored + int32 max_items = 8; // @gotags: json:"maxItems,omitempty" yaml:"maxItems,omitempty" + + // Minimum length for string values. + // Gen: manually-authored + int32 min_length = 9; // @gotags: json:"minLength,omitempty" yaml:"minLength,omitempty" + + // Max length for string values. + // Gen: manually-authored + int32 max_length = 10; // @gotags: json:"maxLength,omitempty" yaml:"maxLength,omitempty" + + // Minimum value for numeric types. + // Gen: manually-authored + float min = 11; // @gotags: json:"min,omitempty" yaml:"min,omitempty" + + // Max value for numeric types. + // Gen: manually-authored + float max = 12; // @gotags: json:"max,omitempty" yaml:"max,omitempty" + + // The name of a section to which this variable belongs. + // variables belong to the root section if this field is + // not set. + // Gen: manually-authored + string section = 13; // @gotags: json:"section,omitempty" yaml:"section,omitempty" + + // UI extension associated with the input variable. + // E.g. for rendering a GCE machine type selector: + // + // xGoogleProperty: + // type: GCE_MACHINE_TYPE + // zoneProperty: myZone + // gceMachineType: + // minCpu: 2 + // minRamGb: + // Gen: manually-authored + GooglePropertyExtension x_google_property = 14; // @gotags: json:"xGoogleProperty,omitempty" yaml:"xGoogleProperty,omitempty" + + // Text describing the validation rules for the property. Typically shown + // after an invalid input. + // Optional. UTF-8 text. No markup. At most 128 characters. + // Gen: manually-authored + string validation = 15; // @gotags: json:"validation,omitempty" yaml:"validation,omitempty" + + // Property subtext, displayed below the title. + // Gen: manually-authored + string subtext = 16; // @gotags: json:"subtext,omitempty" yaml:"subtext,omitempty" + + // Labels for enum values. + // Values must be UTF-8 text with no markup, and at most 64 characters. + // Gen: manually-authored + repeated ValueLabel enum_value_labels = 17; // @gotags: json:"enumValueLabels,omitempty" yaml:"enumValueLabels,omitempty" + + // Indicates the "advanced" level of the input property. Level 0 (default) + // will always be shown. Level 1 corresponds to one expansion (user clicks + // "show advanced options" or "more options"). Higher levels correspond to + // further expansions, or they may be collapsed to level 1 by the UI + // implementation. + // Optional. + // Gen: manually-authored + int32 level = 18; // @gotags: json:"level,omitempty" yaml:"level,omitempty" + + // The name of a boolean group from Input.booleanGroups to which this + // property belongs. Only allowed for properties declared as type boolean in + // the schema. Properties in a boolean group must be adjacent in the + // properties list and must belong to the same section (if any). + // Optional. + // Gen: manually-authored + string boolean_group = 20; // @gotags: json:"booleanGroup,omitempty" yaml:"booleanGroup,omitempty" + + // Alternate default value. + // This allows authors to define an alternative value for pre identified usecases such as security. + // If specified, this value can be used instead of the default value in BlueprintVariable. + // Gen: manually-authored. + message AlternateDefault { + // Type of the alternate default. + enum AlternateType { + // Default + ALTERNATE_TYPE_UNSPECIFIED = 0; + // A more secure default. + ALTERNATE_TYPE_SECURITY = 1; + // A default specifically needed for Design center. + ALTERNATE_TYPE_DC = 2; + } + AlternateType type = 1; // @gotags: json:"type,omitempty" yaml:"type,omitempty" + // Value of the alternate default. + google.protobuf.Value value = 2; // @gotags: json:"value,omitempty" yaml:"value,omitempty" + } + repeated AlternateDefault alt_defaults = 21; // @gotags: json:"altDefaults,omitempty" yaml:"altDefaults,omitempty" + + repeated DisplayVariableToggle toggle_using_variables = 22; // @gotags: json:"toggleUsingVariables,omitempty" yaml:"toggleUsingVariables,omitempty" +} + +message DisplayVariableToggle { + // The name of the variable used to toggle the display of another variable. + string variable_name = 1; // @gotags: json:"variableName,omitempty" yaml:"variableName,omitempty" + + // The value of the variable used to toggle the display of another variable. + repeated string variable_values = 2; // @gotags: json:"variableValues,omitempty" yaml:"variableValue,omitempty" + + // The type of the variable used to toggle the display of another variable. + ToggleType type = 3; // @gotags: json:"type,omitempty" yaml:"type,omitempty" + + enum ToggleType { + // Default + DISPLAY_VARIABLE_TOGGLE_TYPE_UNSPECIFIED = 0; + // Boolean + DISPLAY_VARIABLE_TOGGLE_TYPE_BOOLEAN = 1; + // String + DISPLAY_VARIABLE_TOGGLE_TYPE_STRING = 2; + // Integer + DISPLAY_VARIABLE_TOGGLE_TYPE_INTEGER = 3; + } +} + +message ValueLabel { + string label = 1; // @gotags: json:"label,omitempty" yaml:"label,omitempty" + string value = 2; // @gotags: json:"value,omitempty" yaml:"value,omitempty" +} + +// A logical group of variables. [Section][]s may also be grouped into +// sub-sections. +message DisplaySection { + // The name of the section, referenced by DisplayVariable.Section + // Section names must be unique. + // Gen: manually-authored + string name = 1; // @gotags: json:"name" yaml:"name" + + // Section title. + // If not provided, name will be used instead. + // Gen: manually-authored + string title = 2; // @gotags: json:"title,omitempty" yaml:"title,omitempty" + + // Section tooltip. + // Gen: manually-authored + string tooltip = 3; // @gotags: json:"tooltip,omitempty" yaml:"tooltip,omitempty" + + // Section subtext. + // Gen: manually-authored + string subtext = 4; // @gotags: json:"subtext,omitempty" yaml:"subtext,omitempty" + + // The name of the parent section (if parent is not the root section). + // Gen: manually-authored + string parent = 5; // @gotags: json:"parent,omitempty" yaml:"parent,omitempty" +} + +// Groups a list of boolean properties into one logical property for the +// purposes of the configuration form. The title of a [BooleanGroup][] has the +// same styling as the title of an ordinary property, and individual properties +// in the group will be packed more tightly together to indicate their +// association. Child of [Input][]. +message BooleanGroup { + // The name of the group, referenced by [Property][] + // .booleanGroup. + // BooleanGroup names must be unique. Required. + // Gen: manually-authored + string name = 1; // @gotags: json:"name" yaml:"name" + + // Group title. + // Required. UTF-8 text. No markup. At most 64 characters. + // Gen: manually-authored + string title = 2; // @gotags: json:"title" yaml:"title" + + // Group tooltip. + // Optional. HTML (<a href> tags only). At most 256 + // characters. + // Gen: manually-authored + string tooltip = 3; // @gotags: json:"tooltip,omitempty" yaml:"tooltip,omitempty" + + // Group subtext. + // Optional. HTML (<a href> tags only). At most 256 + // characters. + // Gen: manually-authored + string subtext = 4; // @gotags: json:"subtext,omitempty" yaml:"subtext,omitempty" +} + +message BlueprintUIOutput { + // Short message to be displayed while the blueprint is deploying. + // At most 128 characters. + // Gen: manually-authored + string output_message = 1; // @gotags: json:"outputMessage,omitempty" yaml:"outputMessage,omitempty" + + // List of suggested actions to take. + // Gen: manually-authored + repeated UIActionItem suggested_actions = 2; // @gotags: json:"suggestedActions,omitempty" yaml:"suggestedActions,omitempty" + + // outputs is a map defining a subset of Terraform outputs on the UI + // that may need additional UI configuration. + // Gen: manually-authored + map outputs = 3; // @gotags: json:"outputs,omitempty" yaml:"outputs,omitempty" +} + +// An item appearing in a list of required or suggested steps. +message UIActionItem { + // Summary heading for the item. + // Required. Accepts string expressions. At most 64 characters. + // Gen: manually-authored + string heading = 1; // @gotags: json:"heading" yaml:"heading" + + // Longer description of the item. + // At least one description or snippet is required. + // Accepts string expressions. HTML <a href> + // tags only. At most 512 characters. + // Gen: manually-authored + string description = 2; // @gotags: json:"description,omitempty" yaml:"description,omitempty" + + // Fixed-width formatted code snippet. + // At least one description or snippet is required. + // Accepts string expressions. UTF-8 text. At most 512 characters. + // Gen: manually-authored + string snippet = 3; // @gotags: json:"snippet,omitempty" yaml:"snippet,omitempty" + + // If present, this expression determines whether the item is shown. + // Should be in the form of a Boolean expression e.g. outputs.hasExternalIP + // where `externalIP` is the output. + // Gen: manually-authored + string show_if = 4; // @gotags: json:"showIf,omitempty" yaml:"showIf,omitempty" +} + +// Additional display specific metadata pertaining to a particular +// Terraform output. Only applicable for Outputs that are URLs. +message DisplayOutput { + // open_in_new_tab defines if the Output action should be opened + // in a new tab. + // Gen: manually-authored + bool open_in_new_tab = 1; // @gotags: json:"openInNewTab,omitempty" yaml:"openInNewTab,omitempty" + + // show_in_notification defines if the Output should shown in + // notification for the deployment. + // Gen: manually-authored + bool show_in_notification = 2; // @gotags: json:"showInNotification,omitempty" yaml:"showInNotification,omitempty" + + // label to display on the Output action button + // Gen: manually-authored + string label = 3; // @gotags: json:"label,omitempty" yaml:"label,omitEmpty" + + // Visibility defines how the output is exposed. + // Gen: manually-authored. + enum Visibility { + // Default + VISIBILITY_UNSPECIFIED = 0; + // Expose output as root module output. + VISIBILITY_ROOT = 1; + } + Visibility visibility = 4; // @gotags: json:"visibility,omitempty" yaml:"visibility,omitEmpty" +} diff --git a/cli/bpmetadata/proto/bpmetadata_ui_ext.proto b/cli/bpmetadata/proto/bpmetadata_ui_ext.proto new file mode 100644 index 00000000000..57d6aefa02f --- /dev/null +++ b/cli/bpmetadata/proto/bpmetadata_ui_ext.proto @@ -0,0 +1,223 @@ +syntax = "proto3"; + +package google.cloud.config.bpmetadata; + +// TODO: update copybara configuration for go to java package transformation +option go_package = "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/cli/bpmetadata"; + +// ExtensionType specifies the type of extension. +enum ExtensionType { + // EXTENSIONTYPE_UNDEFINED indicates that the extension type is undefined. + ET_UNDEFINED = 0; + + // General formats. + ET_EMAIL_ADDRESS = 1; + ET_MULTI_LINE_STRING = 2; + ET_CREATE_RESOURCE = 21; + + // GCE related. + ET_GCE_DISK_IMAGE = 3; + ET_GCE_DISK_TYPE = 4; + ET_GCE_DISK_SIZE = 5; + ET_GCE_MACHINE_TYPE = 6; + ET_GCE_NETWORK = 7; + ET_GCE_ZONE = 8; + ET_GCE_SUBNETWORK = 9; + ET_GCE_REGION = 10; + ET_GCE_GPU_TYPE = 11; + ET_GCE_GPU_COUNT = 12; + ET_GCE_EXTERNAL_IP = 13; + ET_GCE_IP_FORWARDING = 14; + ET_GCE_FIREWALL = 15; + ET_GCE_FIREWALL_RANGE = 16; + ET_GCE_GENERIC_RESOURCE = 17; + ET_GCE_LOCATION = 22; + + // GCS related. + ET_GCS_BUCKET = 18; + + // IAM related. + ET_IAM_SERVICE_ACCOUNT = 19; + + // GKE related. + ET_GKE_CLUSTER = 20; +} + +// An extension for variables defined as part of DisplayVariable. The +// extension defines Google-specifc metadata necessary for choosing an +// appropriate input widget or adding restrictions to GCP-specific resources. +message GooglePropertyExtension { + // Type specifies the type of extension. + // Gen: manually-authored + ExtensionType type = 1; // @gotags: json:"type" yaml:"type" + + // Some properties (e.g. GCE_MACHINE_TYPE) require a zone context in order to + // determine the set of allowable values. This field references another + // property from the schema, which must have type GCE_ZONE. + // Gen: manually-authored + string zone_property = 2; // @gotags: json:"zoneProperty,omitempty" yaml:"zoneProperty,omitempty" + + // Property-specific extensions. + // Gen: manually-authored (all property extensions and their child properties) + GCEMachineTypeExtension gce_machine_type = 3; // @gotags: json:"gceMachineType,omitempty" yaml:"gceMachineType,omitempty" + GCEDiskSizeExtension gce_disk_size = 4; // @gotags: json:"gceDiskSize,omitempty" yaml:"gceDiskSize,omitempty" + GCESubnetworkExtension gce_subnetwork = 5; // @gotags: json:"gceSubnetwork,omitempty" yaml:"gceSubnetwork,omitempty" + GCEGenericResourceExtension gce_resource = 6; // @gotags: json:"gceResource,omitempty" yaml:"gceResource,omitempty" + GCEGPUTypeExtension gce_gpu_type = 7; // @gotags: json:"gceGpuType,omitempty" yaml:"gceGpuType,omitempty" + GCEGPUCountExtension gce_gpu_count = 8; // @gotags: json:"gceGpuCount,omitempty" yaml:"gceGpuCount,omitempty" + GCENetworkExtension gce_network = 9; // @gotags: json:"gceNetwork,omitempty" yaml:"gceNetwork,omitempty" + GCEExternalIPExtension gce_external_ip = 10; // @gotags: json:"gceExternalIp,omitempty" yaml:"gceExternalIp,omitempty" + GCEIPForwardingExtension gce_ip_forwarding = 11; // @gotags: json:"gceIpForwarding,omitempty" yaml:"gceIpForwarding,omitempty" + GCEFirewallExtension gce_firewall = 12; // @gotags: json:"gceFirewall,omitempty" yaml:"gceFirewall,omitempty" + GCEFirewallRangeExtension gce_firewall_range = 13; // @gotags: json:"gceFirewallRange,omitempty" yaml:"gceFirewallRange,omitempty" + GCELocationExtension gce_zone = 14; // @gotags: json:"gceZone,omitempty" yaml:"gceZone,omitempty" + GCELocationExtension gce_region = 15; // @gotags: json:"gceRegion,omitempty" yaml:"gceRegion,omitempty" + IAMServiceAccountExtension iam_service_account = 16; // @gotags: json:"iamServiceAccount,omitempty" yaml:"iamServiceAccount,omitempty" + GCEDiskTypeExtension gce_disk_type = 17; // @gotags: json:"gceDiskType,omitempty" yaml:"gceDiskType,omitempty" + GCELocationExtension gce_location = 18; // @gotags: json:"gceLocation,omitempty" yaml:"gceLocation,omitempty" + GKEClusterExtension gke_cluster = 19; // @gotags: json:"gkeCluster,omitempty" yaml:"gkeCluster,omitempty" +} + +// GCELocationExtension specifies a location extension for a Google Compute Engine (GCE) resource. +message GCELocationExtension { + // AllowlistedZones is a list of zones that are allowed for the resource. + repeated string allowlisted_zones = 1; // @gotags: json:"allowlistedZones,omitempty" yaml:"allowlistedZones,omitempty" + + // AllowlistedRegions is a list of regions that are allowed for the resource. + repeated string allowlisted_regions = 2; // @gotags: json:"allowlistedRegions,omitempty" yaml:"allowlistedRegions,omitempty" +} + +// GCEMachineTypeExtension specifies a machine type extension for a GCE resource. +message GCEMachineTypeExtension { + // Minimum cpu. Used to filter the list of selectable machine types. + int32 min_cpu = 1; // @gotags: json:"minCpu,omitempty" yaml:"minCpu,omitempty" + + // Minimum ram. Used to filter the list of selectable machine types. + float min_ram_gb = 2; // @gotags: json:"minRamGb,omitempty" yaml:"minRamGb,omitempty" + + // If true, custom machine types will not be selectable. + // More info: + // https://cloud.google.com/compute/docs/instances/creating-instance-with-custom-machine-type + bool disallow_custom_machine_types = 3; // @gotags: json:"disallowCustomMachineTypes,omitempty" yaml:"disallowCustomMachineTypes,omitempty" + + // Disk Image allows us to reference the image that is being used + // to help provide/gather data such as the image architecture. + string disk_image_property = 4; // @gotags: json:"diskImageProperty,omitempty" yaml:"diskImageProperty,omitempty" +} + +// GCEGPUTypeExtension specifies a GPU type extension for a GCE resource. +message GCEGPUTypeExtension { + // MachineType is the name of the machine type that the GPU is attached to. + string machine_type = 1; // @gotags: json:"machineType" yaml:"machineType" + + // GPUType is the type(s) of GPU that is attached to the machine. + repeated string gpu_type = 2; // @gotags: json:"gpuType,omitempty" yaml:"gpuType,omitempty" +} + +// GCEGPUCountExtension specifies the number of GPUs that should be attached to a machine. +message GCEGPUCountExtension { + // This field references another variable from the schema, + // which must have type GCEMachineType. + string machine_type_variable = 1; // @gotags: json:"machineTypeVariable" yaml:"machineTypeVariable" +} + +// GCEDiskTypeExtension specifies the type of disk for a GCE resource. +message GCEDiskTypeExtension { + // This field references another variable from the schema, + // which must have type GCEMachineType. + string machine_type_variable = 1; // @gotags: json:"machineTypeVariable" yaml:"machineTypeVariable" +} + +// GCEDiskSizeExtension specifies the size of a disk for a GCE resource. +message GCEDiskSizeExtension { + // The allowable range of disk sizes depends on the disk type. This field + // references another variable from the schema, which must have type GCEDiskType. + string disk_type_variable = 1; // @gotags: json:"diskTypeVariable" yaml:"diskTypeVariable" +} + +// GCENetworkExtension specifies a network extension for a GCE resource. +message GCENetworkExtension { + // AllowSharedVpcs indicates this solution can receive + // shared VPC selflinks (fully qualified compute links). + bool allow_shared_vpcs = 1; // @gotags: json:"allowSharedVpcs,omitempty" yaml:"allowSharedVpcs,omitempty" + + // Used to indicate to which machine type this network interface will be + // attached to. + string machine_type_variable = 2; // @gotags: json:"machineTypeVariable" yaml:"machineTypeVariable" + + // Label that will be in front of each Network Interface. + repeated string labels = 3; // @gotags: json:"labels,omitempty" yaml:"labels,omitempty" +} + +// ExternalIPType specifies the type of external IP address. +enum ExternalIPType { + IP_UNSPECIFIED = 0; + // EPHEMERAL indicates that the external IP address is ephemeral. + IP_EPHEMERAL = 1; + // STATIC indicates that the external IP address is static. + IP_STATIC = 2; + // NONE indicates that an external IP is not assigned. + IP_NONE = 3; +} + +message GCEExternalIPExtension { + // NetworkVariable is the name of the network variable that the external IP address belongs to. + string network_variable = 1; // @gotags: json:"networkVariable" yaml:"networkVariable" + + // Type specifies the type of external IP address. Defaults to EPHEMERAL if not specified. + ExternalIPType type = 2; // @gotags: json:"type,omitempty" yaml:"type,omitempty" + + // Flag to denote if an external IP should be configurable. + bool not_configurable = 3; // @gotags: json:"notConfigurable,omitempty" yaml:"notConfigurable,omitempty" + + // Flag to denote if static IPs are allowed for the external IP. + bool allow_static_ips = 4; // @gotags: json:"allowStaticIps,omitempty" yaml:"allowStaticIps,omitempty" +} + +// GCEIPForwardingExtension specifies an IP forwarding extension for a GCE resource. +message GCEIPForwardingExtension { + // NetworkVariable is the name of the network variable that the IP forwarding belongs to. + string network_variable = 1; // @gotags: json:"networkVariable" yaml:"networkVariable" + + // NotConfigurable specifies whether the IP forwarding is configurable. Defaults to false if not specified. + bool not_configurable = 2; // @gotags: json:"notConfigurable,omitempty" yaml:"notConfigurable,omitempty" +} + +message GCEFirewallExtension { + // NetworkVariable is used to indicate the network variable in the schema + // this external IP belongs to. + string network_variable = 1; // @gotags: json:"networkVariable" yaml:"networkVariable" +} + +message GCEFirewallRangeExtension { + // FirewallVariable is used to indicate the firewall variable with the type + // GCEFirewall in the schema to which this firewall range belongs to. + string firewall_variable = 1; // @gotags: json:"firewallVariable" yaml:"firewallVariable" +} + +message GCESubnetworkExtension { + // Subnetwork variable requires a network context in order to determine the + // set of available subnetworks. This field references another + // variable from the schema, which must have type GCENetwork. + string network_variable = 1; // @gotags: json:"networkVariable" yaml:"networkVariable" +} + +message GCEGenericResourceExtension { + // GCE resource type to be fetched. This field references another + // property from the schema, which must have type GCEGenericResource. + string resource_variable = 1; // @gotags: json:"resourceVariable" yaml:"resourceVariable" +} + +message IAMServiceAccountExtension { + // List of IAM roles that to grant to a new SA, or the roles to filter + // existing SAs with. + repeated string roles = 1; // @gotags: json:"roles" yaml:"roles" +} + +message GKEClusterExtension { + // GKE Cluster variable to be used for gathering context needed to select/create + // a GKE Cluster for GKE AI Products. + string location_variable = 1; // @gotags: json:"locationVariable" yaml:"locationVariable" + // Variable that will indicate if we are creating a cluster or using an existing one. + string cluster_creation_variable = 2; // @gotags: json:"clusterCreationVariable" yaml:"clusterCreationVariable" +} diff --git a/cli/bpmetadata/repo.go b/cli/bpmetadata/repo.go new file mode 100644 index 00000000000..7c311abb58b --- /dev/null +++ b/cli/bpmetadata/repo.go @@ -0,0 +1,153 @@ +package bpmetadata + +import ( + "errors" + "os" + "strings" + + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/cli/util" + "github.com/iancoleman/strcase" +) + +type repoDetail struct { + RepoName string + ModuleName string + Source *repoSource +} + +type repoSource struct { + URL string + BlueprintRootPath string + RepoRootPath string + SourceType string +} + +const ( + nestedBpPath = "/modules" +) + +// getRepoDetailsByPath takes a local path for a blueprint and tries +// to get repo details that include its name, path and type +func getRepoDetailsByPath(bpPath string, r *repoDetail, readme []byte) { + // For a submodule, we'll try to get repo details from the + // root blueprint or just return the current repoDetail object + // if it's still in memory. + if strings.Contains(bpPath, nestedBpPath) && r.Source != nil { + // try to parse the module name from MD which will get + // overridden with "["repoName-submoduleName" if repoName is available + r.ModuleName = parseRepoNameFromMd(readme) + if r.RepoName != "" { + r.ModuleName = r.RepoName + "-" + getBpSubmoduleNameInKebabCase(bpPath) + } + + return + } + + s := "git" + bpRootPath := getBlueprintRootPath(bpPath) + currentRootRepoDetails := getRepoDetailsFromRootBp(bpRootPath) + bpPath = strings.TrimSuffix(bpPath, "/") + repoUrl, repoRoot, err := util.GetRepoUrlAndRootPath(bpPath) + if err != nil { + repoUrl = "" + s = "" + } + + if currentRootRepoDetails.Source.URL != "" { + repoUrl = currentRootRepoDetails.Source.URL + } + + n, err := util.GetRepoName(repoUrl) + if err != nil { + n = parseRepoNameFromMd(readme) + } + + *r = repoDetail{ + RepoName: n, + ModuleName: n, + Source: &repoSource{ + URL: repoUrl, + SourceType: s, + BlueprintRootPath: bpRootPath, + RepoRootPath: repoRoot, + }, + } +} + +// getRepoDetailsFromRootBp tries to parse repo details from the +// root blueprint metadata.yaml. This is specially useful when +// metadata is generated for a submodule that +func getRepoDetailsFromRootBp(bpPath string) repoDetail { + rootBp := getBlueprintRootPath(bpPath) + b, err := UnmarshalMetadata(rootBp, metadataFileName) + if errors.Is(err, os.ErrNotExist) { + return repoDetail{ + Source: &repoSource{ + BlueprintRootPath: rootBp, + }, + } + } + + if err != nil && strings.Contains(err.Error(), "proto:") { + return repoDetail{ + Source: &repoSource{ + BlueprintRootPath: rootBp, + }, + } + } + + // There is metadata for root but does not have source info + // which means this is a non-git hosted blueprint + if b.Spec.Info.Source == nil { + return repoDetail{ + RepoName: b.Metadata.Name, + Source: &repoSource{ + BlueprintRootPath: rootBp, + }, + } + } + + // If we get here, root metadata exists and has git info + return repoDetail{ + RepoName: b.Metadata.Name, + Source: &repoSource{ + URL: b.Spec.Info.Source.Repo, + SourceType: "git", + BlueprintRootPath: rootBp, + RepoRootPath: strings.Replace(rootBp, b.Spec.Info.Source.Dir, "", 1), + }, + } +} + +func parseRepoNameFromMd(readme []byte) string { + n := "" + title, err := getMdContent(readme, 1, 1, "", false) + if err == nil { + n = strcase.ToKebab(title.literal) + } + + return n +} + +// getBpRootPath determines if the provided bpPath is for a submodule +// and resolves it to the root module path if necessary +func getBlueprintRootPath(bpPath string) string { + if strings.Contains(bpPath, nestedBpPath) { + i := strings.Index(bpPath, nestedBpPath) + bpPath = bpPath[0:i] + } + + return bpPath +} + +// getBpSubmoduleNameInKebabCase gets the submodule name from the blueprint path +// if it lives under the /modules directory +func getBpSubmoduleNameInKebabCase(bpPath string) string { + i := strings.Index(bpPath, nestedBpPath) + if i == -1 { + return "" + } + + // 9 is the length for "/modules" after which the submodule name starts + return strcase.ToKebab(bpPath[i+9:]) +} diff --git a/cli/bpmetadata/repo_test.go b/cli/bpmetadata/repo_test.go new file mode 100644 index 00000000000..b4204a85bf6 --- /dev/null +++ b/cli/bpmetadata/repo_test.go @@ -0,0 +1,145 @@ +package bpmetadata + +import ( + "testing" +) + +func TestGetBpRootPath(t *testing.T) { + tests := []struct { + name string + path string + want string + wantErr bool + }{ + { + name: "simple", + path: "testdata/bpmetadata/terraform-google-bp01", + want: "testdata/bpmetadata/terraform-google-bp01", + wantErr: false, + }, + { + name: "one level nested", + path: "testdata/bpmetadata/terraform-google-bp01/modules/bp01-01", + want: "testdata/bpmetadata/terraform-google-bp01", + wantErr: false, + }, + { + name: "two level nested", + path: "testdata/bpmetadata/terraform-google-bp01/modules/bp01-01/subbp01-01", + want: "testdata/bpmetadata/terraform-google-bp01", + wantErr: false, + }, + { + name: "docker workspace root", + path: "workspace", + want: "workspace", + wantErr: false, + }, + { + name: "docker workspace submodule", + path: "workspace/modules/bp-01", + want: "workspace", + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := getBlueprintRootPath(tt.path) + if got != tt.want { + t.Errorf("getBlueprintRootPath() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestGetBpSubmoduleName(t *testing.T) { + tests := []struct { + name string + path string + want string + }{ + { + name: "simple - no submodules", + path: "testdata/bpmetadata/terraform-google-bp01", + want: "", + }, + { + name: "simple - valid submodule", + path: "testdata/bpmetadata/terraform-google-bp01/modules/submodule-01", + want: "submodule-01", + }, + { + name: "simple - invalid submodule", + path: "testdata/bpmetadata/terraform-google-bp01/foo/submodule-01", + want: "", + }, + { + name: "simple - submodule with underscores", + path: "testdata/bpmetadata/terraform-google-bp01/modules/submodule_01", + want: "submodule-01", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := getBpSubmoduleNameInKebabCase(tt.path) + if got != tt.want { + t.Errorf("getBpSubmoduleNameInKebabCase() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestGetRepoDetailsFromRootBp(t *testing.T) { + tests := []struct { + name string + path string + wantRepoDetails repoDetail + }{ + { + name: "root metadata does not exist", + path: "../testdata/bpmetadata/content/examples/simple_regional_beta/modules/submodule-01", + wantRepoDetails: repoDetail{ + Source: &repoSource{ + BlueprintRootPath: "../testdata/bpmetadata/content/examples/simple_regional_beta", + }, + }, + }, + { + name: "root metadata exists but does not have source info", + path: "../testdata/bpmetadata/content/examples/acm/modules/submodule-01", + wantRepoDetails: repoDetail{ + RepoName: "terraform-google-acm", + Source: &repoSource{ + BlueprintRootPath: "../testdata/bpmetadata/content/examples/acm", + }, + }, + }, + { + name: "root metadata exists and has source info", + path: "../testdata/bpmetadata/content/examples/simple_regional/modules/submodule-01", + wantRepoDetails: repoDetail{ + RepoName: "simple-regional", + Source: &repoSource{ + URL: "https://github.com/GoogleCloudPlatform/simple-regional", + SourceType: "git", + BlueprintRootPath: "../testdata/bpmetadata/content/examples/simple_regional", + RepoRootPath: "../testdata/bpmetadata/content/examples/simple_regional", + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := getRepoDetailsFromRootBp(tt.path) + if got.Source != nil && *got.Source != *tt.wantRepoDetails.Source { + t.Errorf("getRepoDetailsFromRootBp() - Source = %v, want %v", *got.Source, *tt.wantRepoDetails.Source) + } + + if got.RepoName != tt.wantRepoDetails.RepoName { + t.Errorf("getRepoDetailsFromRootBp() - RepoName = %v, want %v", got.RepoName, tt.wantRepoDetails.RepoName) + } + }) + } +} diff --git a/cli/bpmetadata/schema/gcp-blueprint-metadata.json b/cli/bpmetadata/schema/gcp-blueprint-metadata.json new file mode 100644 index 00000000000..ab39ab7f0ce --- /dev/null +++ b/cli/bpmetadata/schema/gcp-blueprint-metadata.json @@ -0,0 +1,1253 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/cli/bpmetadata/blueprint-metadata", + "$ref": "#/$defs/BlueprintMetadata", + "$defs": { + "BlueprintActuationTool": { + "properties": { + "flavor": { + "type": "string" + }, + "version": { + "type": "string" + } + }, + "additionalProperties": false, + "type": "object" + }, + "BlueprintArchitecture": { + "properties": { + "diagramUrl": { + "type": "string" + }, + "description": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "diagramUrl", + "description" + ] + }, + "BlueprintAuthor": { + "properties": { + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "url": { + "type": "string" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "title" + ] + }, + "BlueprintCloudProduct": { + "properties": { + "productId": { + "type": "string" + }, + "pageUrl": { + "type": "string" + }, + "label": { + "type": "string" + }, + "isExternal": { + "type": "boolean" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "pageUrl" + ] + }, + "BlueprintConnection": { + "properties": { + "source": { + "$ref": "#/$defs/ConnectionSource" + }, + "spec": { + "$ref": "#/$defs/ConnectionSpec" + } + }, + "additionalProperties": false, + "type": "object" + }, + "BlueprintContent": { + "properties": { + "architecture": { + "$ref": "#/$defs/BlueprintArchitecture" + }, + "diagrams": { + "items": { + "$ref": "#/$defs/BlueprintDiagram" + }, + "type": "array" + }, + "documentation": { + "items": { + "$ref": "#/$defs/BlueprintListContent" + }, + "type": "array" + }, + "subBlueprints": { + "items": { + "$ref": "#/$defs/BlueprintMiscContent" + }, + "type": "array" + }, + "examples": { + "items": { + "$ref": "#/$defs/BlueprintMiscContent" + }, + "type": "array" + } + }, + "additionalProperties": false, + "type": "object" + }, + "BlueprintCostEstimate": { + "properties": { + "description": { + "type": "string" + }, + "url": { + "type": "string" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "description", + "url" + ] + }, + "BlueprintDescription": { + "properties": { + "tagline": { + "type": "string" + }, + "detailed": { + "type": "string" + }, + "preDeploy": { + "type": "string" + }, + "html": { + "type": "string" + }, + "eulaUrls": { + "items": { + "type": "string" + }, + "type": "array" + }, + "architecture": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "additionalProperties": false, + "type": "object" + }, + "BlueprintDiagram": { + "properties": { + "name": { + "type": "string" + }, + "altText": { + "type": "string" + }, + "description": { + "type": "string" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "name" + ] + }, + "BlueprintInfo": { + "properties": { + "title": { + "type": "string" + }, + "source": { + "$ref": "#/$defs/BlueprintRepoDetail" + }, + "version": { + "type": "string" + }, + "actuationTool": { + "$ref": "#/$defs/BlueprintActuationTool" + }, + "description": { + "$ref": "#/$defs/BlueprintDescription" + }, + "icon": { + "type": "string" + }, + "deploymentDuration": { + "$ref": "#/$defs/BlueprintTimeEstimate" + }, + "costEstimate": { + "$ref": "#/$defs/BlueprintCostEstimate" + }, + "cloudProducts": { + "items": { + "$ref": "#/$defs/BlueprintCloudProduct" + }, + "type": "array" + }, + "quotaDetails": { + "items": { + "$ref": "#/$defs/BlueprintQuotaDetail" + }, + "type": "array" + }, + "author": { + "$ref": "#/$defs/BlueprintAuthor" + }, + "softwareGroups": { + "items": { + "$ref": "#/$defs/BlueprintSoftwareGroup" + }, + "type": "array" + }, + "supportInfo": { + "$ref": "#/$defs/BlueprintSupport" + }, + "orgPolicyChecks": { + "items": { + "$ref": "#/$defs/BlueprintOrgPolicyCheck" + }, + "type": "array" + }, + "singleDeployment": { + "type": "boolean" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "title" + ] + }, + "BlueprintInterface": { + "properties": { + "variables": { + "items": { + "$ref": "#/$defs/BlueprintVariable" + }, + "type": "array" + }, + "variableGroups": { + "items": { + "$ref": "#/$defs/BlueprintVariableGroup" + }, + "type": "array" + }, + "outputs": { + "items": { + "$ref": "#/$defs/BlueprintOutput" + }, + "type": "array" + } + }, + "additionalProperties": false, + "type": "object" + }, + "BlueprintListContent": { + "properties": { + "title": { + "type": "string" + }, + "url": { + "type": "string" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "title" + ] + }, + "BlueprintMetadata": { + "properties": { + "apiVersion": { + "type": "string" + }, + "kind": { + "type": "string" + }, + "metadata": { + "$ref": "#/$defs/ResourceTypeMeta" + }, + "spec": { + "$ref": "#/$defs/BlueprintMetadataSpec" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "spec" + ] + }, + "BlueprintMetadataSpec": { + "properties": { + "info": { + "$ref": "#/$defs/BlueprintInfo" + }, + "content": { + "$ref": "#/$defs/BlueprintContent" + }, + "interfaces": { + "$ref": "#/$defs/BlueprintInterface" + }, + "requirements": { + "$ref": "#/$defs/BlueprintRequirements" + }, + "ui": { + "$ref": "#/$defs/BlueprintUI" + } + }, + "additionalProperties": false, + "type": "object" + }, + "BlueprintMiscContent": { + "properties": { + "name": { + "type": "string" + }, + "location": { + "type": "string" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "name" + ] + }, + "BlueprintOrgPolicyCheck": { + "properties": { + "policyId": { + "type": "string" + }, + "requiredValues": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "policyId" + ] + }, + "BlueprintOutput": { + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "type": true + }, + "additionalProperties": false, + "type": "object", + "required": [ + "name" + ] + }, + "BlueprintQuotaDetail": { + "properties": { + "dynamicVariable": { + "type": "string" + }, + "resourceType": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "integer" + } + ] + }, + "quotaType": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "resourceType", + "quotaType" + ] + }, + "BlueprintRepoDetail": { + "properties": { + "repo": { + "type": "string" + }, + "sourceType": { + "type": "string" + }, + "dir": { + "type": "string" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "repo", + "sourceType" + ] + }, + "BlueprintRequirements": { + "properties": { + "roles": { + "items": { + "$ref": "#/$defs/BlueprintRoles" + }, + "type": "array" + }, + "services": { + "items": { + "type": "string" + }, + "type": "array" + }, + "providerVersions": { + "items": { + "$ref": "#/$defs/ProviderVersion" + }, + "type": "array" + } + }, + "additionalProperties": false, + "type": "object" + }, + "BlueprintRoles": { + "properties": { + "level": { + "type": "string" + }, + "roles": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "level", + "roles" + ] + }, + "BlueprintSoftware": { + "properties": { + "title": { + "type": "string" + }, + "version": { + "type": "string" + }, + "url": { + "type": "string" + }, + "licenseUrl": { + "type": "string" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "title" + ] + }, + "BlueprintSoftwareGroup": { + "properties": { + "type": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "integer" + } + ] + }, + "software": { + "items": { + "$ref": "#/$defs/BlueprintSoftware" + }, + "type": "array" + } + }, + "additionalProperties": false, + "type": "object" + }, + "BlueprintSupport": { + "properties": { + "description": { + "type": "string" + }, + "url": { + "type": "string" + }, + "entity": { + "type": "string" + }, + "showSupportId": { + "type": "boolean" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "description" + ] + }, + "BlueprintTimeEstimate": { + "properties": { + "configurationSecs": { + "type": "integer" + }, + "deploymentSecs": { + "type": "integer" + } + }, + "additionalProperties": false, + "type": "object" + }, + "BlueprintUI": { + "properties": { + "input": { + "$ref": "#/$defs/BlueprintUIInput" + }, + "runtime": { + "$ref": "#/$defs/BlueprintUIOutput" + } + }, + "additionalProperties": false, + "type": "object" + }, + "BlueprintUIInput": { + "properties": { + "variables": { + "additionalProperties": { + "$ref": "#/$defs/DisplayVariable" + }, + "type": "object" + }, + "sections": { + "items": { + "$ref": "#/$defs/DisplaySection" + }, + "type": "array" + }, + "booleanGroups": { + "items": { + "$ref": "#/$defs/BooleanGroup" + }, + "type": "array" + } + }, + "additionalProperties": false, + "type": "object" + }, + "BlueprintUIOutput": { + "properties": { + "outputMessage": { + "type": "string" + }, + "suggestedActions": { + "items": { + "$ref": "#/$defs/UIActionItem" + }, + "type": "array" + }, + "outputs": { + "additionalProperties": { + "$ref": "#/$defs/DisplayOutput" + }, + "type": "object" + } + }, + "additionalProperties": false, + "type": "object" + }, + "BlueprintVariable": { + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "varType": { + "type": "string" + }, + "defaultValue": true, + "required": { + "type": "boolean" + }, + "connections": { + "items": { + "$ref": "#/$defs/BlueprintConnection" + }, + "type": "array" + } + }, + "additionalProperties": false, + "type": "object" + }, + "BlueprintVariableGroup": { + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "variables": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "name" + ] + }, + "BooleanGroup": { + "properties": { + "name": { + "type": "string" + }, + "title": { + "type": "string" + }, + "tooltip": { + "type": "string" + }, + "subtext": { + "type": "string" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "name", + "title" + ] + }, + "ConnectionSource": { + "properties": { + "source": { + "type": "string" + }, + "version": { + "type": "string" + } + }, + "additionalProperties": false, + "type": "object" + }, + "ConnectionSpec": { + "properties": { + "outputExpr": { + "type": "string" + }, + "inputPath": { + "type": "string" + } + }, + "additionalProperties": false, + "type": "object" + }, + "DisplayOutput": { + "properties": { + "openInNewTab": { + "type": "boolean" + }, + "showInNotification": { + "type": "boolean" + }, + "label": { + "type": "string" + }, + "visibility": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "integer" + } + ] + } + }, + "additionalProperties": false, + "type": "object" + }, + "DisplaySection": { + "properties": { + "name": { + "type": "string" + }, + "title": { + "type": "string" + }, + "tooltip": { + "type": "string" + }, + "subtext": { + "type": "string" + }, + "parent": { + "type": "string" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "name" + ] + }, + "DisplayVariable": { + "properties": { + "name": { + "type": "string" + }, + "title": { + "type": "string" + }, + "invisible": { + "type": "boolean" + }, + "tooltip": { + "type": "string" + }, + "placeholder": { + "type": "string" + }, + "regexValidation": { + "type": "string" + }, + "minItems": { + "type": "integer" + }, + "maxItems": { + "type": "integer" + }, + "minLength": { + "type": "integer" + }, + "maxLength": { + "type": "integer" + }, + "min": { + "type": "number" + }, + "max": { + "type": "number" + }, + "section": { + "type": "string" + }, + "xGoogleProperty": { + "$ref": "#/$defs/GooglePropertyExtension" + }, + "validation": { + "type": "string" + }, + "subtext": { + "type": "string" + }, + "enumValueLabels": { + "items": { + "$ref": "#/$defs/ValueLabel" + }, + "type": "array" + }, + "level": { + "type": "integer" + }, + "booleanGroup": { + "type": "string" + }, + "altDefaults": { + "items": { + "$ref": "#/$defs/DisplayVariable_AlternateDefault" + }, + "type": "array" + }, + "toggleUsingVariables": { + "items": { + "$ref": "#/$defs/DisplayVariableToggle" + }, + "type": "array" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "name", + "title" + ] + }, + "DisplayVariableToggle": { + "properties": { + "variableName": { + "type": "string" + }, + "variableValues": { + "items": { + "type": "string" + }, + "type": "array" + }, + "type": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "integer" + } + ] + } + }, + "additionalProperties": false, + "type": "object" + }, + "DisplayVariable_AlternateDefault": { + "properties": { + "type": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "integer" + } + ] + }, + "value": true + }, + "additionalProperties": false, + "type": "object" + }, + "GCEDiskSizeExtension": { + "properties": { + "diskTypeVariable": { + "type": "string" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "diskTypeVariable" + ] + }, + "GCEDiskTypeExtension": { + "properties": { + "machineTypeVariable": { + "type": "string" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "machineTypeVariable" + ] + }, + "GCEExternalIPExtension": { + "properties": { + "networkVariable": { + "type": "string" + }, + "type": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "integer" + } + ] + }, + "notConfigurable": { + "type": "boolean" + }, + "allowStaticIps": { + "type": "boolean" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "networkVariable" + ] + }, + "GCEFirewallExtension": { + "properties": { + "networkVariable": { + "type": "string" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "networkVariable" + ] + }, + "GCEFirewallRangeExtension": { + "properties": { + "firewallVariable": { + "type": "string" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "firewallVariable" + ] + }, + "GCEGPUCountExtension": { + "properties": { + "machineTypeVariable": { + "type": "string" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "machineTypeVariable" + ] + }, + "GCEGPUTypeExtension": { + "properties": { + "machineType": { + "type": "string" + }, + "gpuType": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "machineType" + ] + }, + "GCEGenericResourceExtension": { + "properties": { + "resourceVariable": { + "type": "string" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "resourceVariable" + ] + }, + "GCEIPForwardingExtension": { + "properties": { + "networkVariable": { + "type": "string" + }, + "notConfigurable": { + "type": "boolean" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "networkVariable" + ] + }, + "GCELocationExtension": { + "properties": { + "allowlistedZones": { + "items": { + "type": "string" + }, + "type": "array" + }, + "allowlistedRegions": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "additionalProperties": false, + "type": "object" + }, + "GCEMachineTypeExtension": { + "properties": { + "minCpu": { + "type": "integer" + }, + "minRamGb": { + "type": "number" + }, + "disallowCustomMachineTypes": { + "type": "boolean" + }, + "diskImageProperty": { + "type": "string" + } + }, + "additionalProperties": false, + "type": "object" + }, + "GCENetworkExtension": { + "properties": { + "allowSharedVpcs": { + "type": "boolean" + }, + "machineTypeVariable": { + "type": "string" + }, + "labels": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "machineTypeVariable" + ] + }, + "GCESubnetworkExtension": { + "properties": { + "networkVariable": { + "type": "string" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "networkVariable" + ] + }, + "GKEClusterExtension": { + "properties": { + "locationVariable": { + "type": "string" + }, + "clusterCreationVariable": { + "type": "string" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "locationVariable", + "clusterCreationVariable" + ] + }, + "GooglePropertyExtension": { + "properties": { + "type": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "integer" + } + ] + }, + "zoneProperty": { + "type": "string" + }, + "gceMachineType": { + "$ref": "#/$defs/GCEMachineTypeExtension" + }, + "gceDiskSize": { + "$ref": "#/$defs/GCEDiskSizeExtension" + }, + "gceSubnetwork": { + "$ref": "#/$defs/GCESubnetworkExtension" + }, + "gceResource": { + "$ref": "#/$defs/GCEGenericResourceExtension" + }, + "gceGpuType": { + "$ref": "#/$defs/GCEGPUTypeExtension" + }, + "gceGpuCount": { + "$ref": "#/$defs/GCEGPUCountExtension" + }, + "gceNetwork": { + "$ref": "#/$defs/GCENetworkExtension" + }, + "gceExternalIp": { + "$ref": "#/$defs/GCEExternalIPExtension" + }, + "gceIpForwarding": { + "$ref": "#/$defs/GCEIPForwardingExtension" + }, + "gceFirewall": { + "$ref": "#/$defs/GCEFirewallExtension" + }, + "gceFirewallRange": { + "$ref": "#/$defs/GCEFirewallRangeExtension" + }, + "gceZone": { + "$ref": "#/$defs/GCELocationExtension" + }, + "gceRegion": { + "$ref": "#/$defs/GCELocationExtension" + }, + "iamServiceAccount": { + "$ref": "#/$defs/IAMServiceAccountExtension" + }, + "gceDiskType": { + "$ref": "#/$defs/GCEDiskTypeExtension" + }, + "gceLocation": { + "$ref": "#/$defs/GCELocationExtension" + }, + "gkeCluster": { + "$ref": "#/$defs/GKEClusterExtension" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "type" + ] + }, + "IAMServiceAccountExtension": { + "properties": { + "roles": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "roles" + ] + }, + "ProviderVersion": { + "properties": { + "source": { + "type": "string" + }, + "version": { + "type": "string" + } + }, + "additionalProperties": false, + "type": "object" + }, + "ResourceTypeMeta": { + "properties": { + "name": { + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "annotations": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "additionalProperties": false, + "type": "object" + }, + "UIActionItem": { + "properties": { + "heading": { + "type": "string" + }, + "description": { + "type": "string" + }, + "snippet": { + "type": "string" + }, + "showIf": { + "type": "string" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "heading" + ] + }, + "Value": { + "properties": { + "Kind": true + }, + "additionalProperties": false, + "type": "object", + "required": [ + "Kind" + ] + }, + "ValueLabel": { + "properties": { + "label": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "additionalProperties": false, + "type": "object" + } + } +} diff --git a/cli/bpmetadata/schema/generate.go b/cli/bpmetadata/schema/generate.go new file mode 100644 index 00000000000..9dadd2c6a28 --- /dev/null +++ b/cli/bpmetadata/schema/generate.go @@ -0,0 +1,71 @@ +package main + +import ( + "encoding/json" + "os" + "path" + + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/cli/bpmetadata" + "github.com/invopop/jsonschema" +) + +const schemaFileName = "gcp-blueprint-metadata.json" + +// generateSchema creates a JSON Schema based on the types +// defined in the type BlueprintMetadata and it's recursive +// children. The generated schema will be used to validate +// all metadata files for consistency and will be uploaded +// to https://www.schemastore.org/ to provide IntelliSense +// VSCode for authors manually authoring the metadata. +func generateSchemaFile(o, wdPath string) error { + sData, err := GenerateSchema() + if err != nil { + return err + } + sData = append(sData, []byte("\n")...) + + // check if the provided output path is relative + if !path.IsAbs(o) { + o = path.Join(wdPath, o) + } + + err = os.WriteFile(path.Join(o, schemaFileName), sData, 0644) + if err != nil { + return err + } + + Log.Info("generated JSON schema for BlueprintMetadata", "path", path.Join(o, schemaFileName)) + return nil +} + +func GenerateSchema() ([]byte, error) { + r := &jsonschema.Reflector{} + s := r.Reflect(&bpmetadata.BlueprintMetadata{}) + s.Version = "http://json-schema.org/draft-07/schema#" + + // defaultValue was defined as interface{} and has changed to + // Value type with proto definitions. To keep backwards + // compatibility for schema validation, this is being set to + // true i.e. it's presence is validated regardless of type. + vDef, defExists := s.Definitions["BlueprintVariable"] + if defExists { + vDef.Properties.Set("defaultValue", jsonschema.TrueSchema) + } + // JSON schema seems to infer google.protobuf.Value as object type + // so we use the same workaround as above. + oDef, defExists := s.Definitions["BlueprintOutput"] + if defExists { + oDef.Properties.Set("type", jsonschema.TrueSchema) + } + altDefaultDef, defExists := s.Definitions["DisplayVariable_AlternateDefault"] + if defExists { + altDefaultDef.Properties.Set("value", jsonschema.TrueSchema) + } + + sData, err := json.MarshalIndent(s, "", " ") + if err != nil { + return nil, err + } + + return sData, nil +} diff --git a/cli/bpmetadata/schema/main.go b/cli/bpmetadata/schema/main.go new file mode 100644 index 00000000000..ddc6a87f7a0 --- /dev/null +++ b/cli/bpmetadata/schema/main.go @@ -0,0 +1,44 @@ +package main + +import ( + "flag" + "fmt" + "os" + "path" + + log "github.com/inconshreveable/log15" +) + +// bpmetadata log15 handler +var Log = log.New() + +func main() { + flag.Usage = func() { + fmt.Fprintf(os.Stderr, + "USAGE: %s [-output=PATH]\n", + path.Base(os.Args[0])) + flag.PrintDefaults() + os.Exit(1) + } + + output := flag.String("output", "", "output path for generating the JSON schema definition") + flag.Parse() + + os.Exit(process(*output)) +} + +func process(output string) int { + // get the working directory for the command + wdPath, err := os.Getwd() + if err != nil { + Log.Error("error getting working dir", "err", err) + return 1 + } + + if err := generateSchemaFile(output, wdPath); err != nil { + Log.Error("error generating schema", "err", err) + return 1 + } + + return 0 +} diff --git a/cli/bpmetadata/tfconfig.go b/cli/bpmetadata/tfconfig.go new file mode 100644 index 00000000000..caccbfe2fbd --- /dev/null +++ b/cli/bpmetadata/tfconfig.go @@ -0,0 +1,622 @@ +package bpmetadata + +import ( + "flag" + "fmt" + "os" + "path/filepath" + "regexp" + "sort" + "strings" + + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/cli/bpmetadata/parser" + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/tft" + "github.com/gruntwork-io/terratest/modules/terraform" + hcl "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/gohcl" + "github.com/hashicorp/hcl/v2/hclparse" + "github.com/hashicorp/terraform-config-inspect/tfconfig" + testingiface "github.com/mitchellh/go-testing-interface" + "github.com/stretchr/testify/assert" + "google.golang.org/protobuf/types/known/structpb" +) + +const ( + versionRegEx = "/v([0-9]+[.0-9]*)$" +) + +type blueprintVersion struct { + moduleVersion string + requiredTfVersion string +} + +var rootSchema = &hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "terraform", + LabelNames: nil, + }, + { + Type: "locals", + LabelNames: nil, + }, + { + Type: "resource", + LabelNames: []string{"type", "name"}, + }, + { + Type: "module", + LabelNames: []string{"name"}, + }, + }, +} + +var metaSchema = &hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "provider_meta", + LabelNames: []string{"name"}, + }, + }, +} + +var variableSchema = &hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "variable", + LabelNames: []string{"name"}, + }, + }, +} + +var metaBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "module_name", + }, + }, +} + +var moduleSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "activate_apis", + }, + }, +} + +// Create alias for generateTFStateFile so we can mock it in unit test. +var tfState = generateTFState + +// getBlueprintVersion gets both the required core version and the +// version of the blueprint +func getBlueprintVersion(configPath string) (*blueprintVersion, error) { + bytes, err := os.ReadFile(configPath) + if err != nil { + return nil, err + } + + //create hcl file object from the provided tf config + fileName := filepath.Base(configPath) + var diags hcl.Diagnostics + p := hclparse.NewParser() + versionsFile, fileDiags := p.ParseHCL(bytes, fileName) + diags = append(diags, fileDiags...) + err = hasHclErrors(diags) + if err != nil { + return nil, err + } + + //parse out the blueprint version from the config + modName, err := parseBlueprintVersion(versionsFile, diags) + if err != nil { + return nil, fmt.Errorf("error parsing blueprint version: %w", err) + } + + //parse out the required version from the config + var hclModule tfconfig.Module + hclModule.RequiredProviders = make(map[string]*tfconfig.ProviderRequirement) + hclModuleDiag := tfconfig.LoadModuleFromFile(versionsFile, &hclModule) + diags = append(diags, hclModuleDiag...) + err = hasHclErrors(diags) + if err != nil { + return nil, err + } + + requiredCore := "" + if len(hclModule.RequiredCore) != 0 { + //always looking for the first element since tf blueprints + //have one required core version + requiredCore = hclModule.RequiredCore[0] + } + + return &blueprintVersion{ + moduleVersion: modName, + requiredTfVersion: requiredCore, + }, nil +} + +// parseBlueprintVersion gets the blueprint version from the provided config +// from the provider_meta block +func parseBlueprintVersion(versionsFile *hcl.File, diags hcl.Diagnostics) (string, error) { + re := regexp.MustCompile(versionRegEx) + // PartialContent() returns TF content containing blocks and attributes + // based on the provided schema + rootContent, _, rootContentDiags := versionsFile.Body.PartialContent(rootSchema) + diags = append(diags, rootContentDiags...) + err := hasHclErrors(diags) + if err != nil { + return "", err + } + + // based on the content returned, iterate through blocks and look for + // the terraform block specfically + for _, rootBlock := range rootContent.Blocks { + if rootBlock.Type != "terraform" { + continue + } + + // do a PartialContent() call again but now for the provider_meta block + // within the terraform block + tfContent, _, tfContentDiags := rootBlock.Body.PartialContent(metaSchema) + diags = append(diags, tfContentDiags...) + err := hasHclErrors(diags) + if err != nil { + return "", err + } + + for _, tfContentBlock := range tfContent.Blocks { + if tfContentBlock.Type != "provider_meta" { + continue + } + + // this PartialContent() call with get the module_name attribute + // that contains the version info + metaContent, _, metaContentDiags := tfContentBlock.Body.PartialContent(metaBlockSchema) + diags = append(diags, metaContentDiags...) + err := hasHclErrors(diags) + if err != nil { + return "", err + } + + versionAttr, defined := metaContent.Attributes["module_name"] + if !defined { + return "", fmt.Errorf("module_name not defined for provider_meta") + } + + // get the module name from the version attribute and extract the + // version name only + var modName string + diags := gohcl.DecodeExpression(versionAttr.Expr, nil, &modName) + err = hasHclErrors(diags) + if err != nil { + return "", err + } + + m := re.FindStringSubmatch(modName) + if len(m) > 0 { + return m[len(m)-1], nil + } + + return "", nil + } + + break + } + + return "", nil +} + +// parseBlueprintProviderVersions gets the blueprint provider_versions from the provided config +// from the required_providers block. +func parseBlueprintProviderVersions(versionsFile *hcl.File) ([]*ProviderVersion, error) { + var v []*ProviderVersion + // parse out the required providers from the config + var hclModule tfconfig.Module + hclModule.RequiredProviders = make(map[string]*tfconfig.ProviderRequirement) + diags := tfconfig.LoadModuleFromFile(versionsFile, &hclModule) + err := hasHclErrors(diags) + if err != nil { + return nil, err + } + + for _, providerData := range hclModule.RequiredProviders { + if providerData.Source == "" { + Log.Info("Not found source in provider settings\n") + continue + } + if len(providerData.VersionConstraints) == 0 { + Log.Info("Not found version in provider settings\n") + continue + } + v = append(v, &ProviderVersion{ + Source: providerData.Source, + Version: strings.Join(providerData.VersionConstraints, ", "), + }) + } + // Sort provider_versions + sort.SliceStable(v, func(i, j int) bool { return v[i].Source < v[j].Source }) + return v, nil +} + +// getBlueprintInterfaces gets the variables and outputs associated +// with the blueprint +func getBlueprintInterfaces(configPath string) (*BlueprintInterface, error) { + //load the configs from the dir path + mod, diags := tfconfig.LoadModule(configPath) + err := hasTfconfigErrors(diags) + if err != nil { + return nil, err + } + + var variables []*BlueprintVariable + for _, val := range mod.Variables { + v := getBlueprintVariable(val) + variables = append(variables, v) + } + + // Get the varible orders from tf file. + variableOrders, sortErr := getBlueprintVariableOrders(configPath) + if sortErr != nil { + Log.Info("Failed to get variables orders. Fallback to sort by variable names.", sortErr) + sort.SliceStable(variables, func(i, j int) bool { return variables[i].Name < variables[j].Name }) + } else { + Log.Info("Sort variables by the original input order.") + sort.SliceStable(variables, func(i, j int) bool { + return variableOrders[variables[i].Name] < variableOrders[variables[j].Name] + }) + } + + var outputs []*BlueprintOutput + for _, val := range mod.Outputs { + o := getBlueprintOutput(val) + outputs = append(outputs, o) + } + + // Sort outputs + sort.SliceStable(outputs, func(i, j int) bool { return outputs[i].Name < outputs[j].Name }) + + return &BlueprintInterface{ + Variables: variables, + Outputs: outputs, + }, nil +} + +func getBlueprintVariableOrders(configPath string) (map[string]int, error) { + p := hclparse.NewParser() + variableFile, hclDiags := p.ParseHCLFile(filepath.Join(configPath, "variables.tf")) + err := hasHclErrors(hclDiags) + if hclDiags.HasErrors() { + return nil, err + } + variableContent, _, hclDiags := variableFile.Body.PartialContent(variableSchema) + err = hasHclErrors(hclDiags) + if hclDiags.HasErrors() { + return nil, err + } + variableOrderKeys := make(map[string]int) + for i, block := range variableContent.Blocks { + // We only care about variable blocks. + if block.Type != "variable" { + continue + } + // We expect a single label which is the variable name. + if len(block.Labels) != 1 || len(block.Labels[0]) == 0 { + return nil, fmt.Errorf("Vaiable block has no name.") + } + + variableOrderKeys[block.Labels[0]] = i + } + return variableOrderKeys, nil +} + +// build variable +func getBlueprintVariable(modVar *tfconfig.Variable) *BlueprintVariable { + v := &BlueprintVariable{ + Name: modVar.Name, + Description: modVar.Description, + Required: modVar.Required, + VarType: modVar.Type, + } + if modVar.Default == nil { + return v + } + + vl, err := structpb.NewValue(modVar.Default) + if err == nil { + v.DefaultValue = vl + } + + return v +} + +// build output +func getBlueprintOutput(modOut *tfconfig.Output) *BlueprintOutput { + return &BlueprintOutput{ + Name: modOut.Name, + Description: modOut.Description, + } +} + +// getBlueprintRequirements gets the services and roles associated +// with the blueprint +func getBlueprintRequirements(rolesConfigPath, servicesConfigPath, versionsConfigPath string) (*BlueprintRequirements, error) { + //parse blueprint roles + p := hclparse.NewParser() + rolesFile, diags := p.ParseHCLFile(rolesConfigPath) + err := hasHclErrors(diags) + if err != nil { + return nil, err + } + + r, err := parseBlueprintRoles(rolesFile) + if err != nil { + return nil, err + } + + //parse blueprint services + servicesFile, diags := p.ParseHCLFile(servicesConfigPath) + err = hasHclErrors(diags) + if err != nil { + return nil, err + } + + s, err := parseBlueprintServices(servicesFile) + if err != nil { + return nil, err + } + + versionCfgFileExists, _ := fileExists(versionsConfigPath) + + if !versionCfgFileExists { + return &BlueprintRequirements{ + Roles: r, + Services: s, + }, nil + } + + //parse blueprint provider versions + versionsFile, diags := p.ParseHCLFile(versionsConfigPath) + err = hasHclErrors(diags) + if err != nil { + return nil, err + } + + v, err := parseBlueprintProviderVersions(versionsFile) + if err != nil { + return nil, err + } + + return &BlueprintRequirements{ + Roles: r, + Services: s, + ProviderVersions: v, + }, nil + +} + +// parseBlueprintRoles gets the roles required for the blueprint to be provisioned +func parseBlueprintRoles(rolesFile *hcl.File) ([]*BlueprintRoles, error) { + var r []*BlueprintRoles + iamContent, _, diags := rolesFile.Body.PartialContent(rootSchema) + err := hasHclErrors(diags) + if err != nil { + return nil, err + } + + for _, block := range iamContent.Blocks { + if block.Type != "locals" { + continue + } + + iamAttrs, diags := block.Body.JustAttributes() + err := hasHclErrors(diags) + if err != nil { + return nil, err + } + + for k := range iamAttrs { + var iamRoles []string + attrValue, _ := iamAttrs[k].Expr.Value(nil) + if !attrValue.Type().IsTupleType() { + continue + } + + ie := attrValue.ElementIterator() + for ie.Next() { + _, v := ie.Element() + iamRoles = append(iamRoles, v.AsString()) + } + + containerRoles := &BlueprintRoles{ + // TODO: (b/248123274) no good way to associate granularity yet + Level: "Project", + Roles: iamRoles, + } + + r = append(r, containerRoles) + } + + // because we're only interested in the top-level locals block + break + } + + sortBlueprintRoles(r) + return r, nil +} + +// Sort blueprint roles. +func sortBlueprintRoles(r []*BlueprintRoles) { + sort.SliceStable(r, func(i, j int) bool { + // 1. Sort by Level + if r[i].Level != r[j].Level { + return r[i].Level < r[j].Level + } + + // 2. Sort by the len of roles + if len(r[i].Roles) != len(r[j].Roles) { + return len(r[i].Roles) < len(r[j].Roles) + } + + // 3. Sort by the first role (if available) + if len(r[i].Roles) > 0 && len(r[j].Roles) > 0 { + return r[i].Roles[0] < r[j].Roles[0] + } + + return false + }) +} + +// parseBlueprintServices gets the gcp api services required for the blueprint +// to be provisioned +func parseBlueprintServices(servicesFile *hcl.File) ([]string, error) { + var s []string + servicesContent, _, diags := servicesFile.Body.PartialContent(rootSchema) + err := hasHclErrors(diags) + if err != nil { + return nil, err + } + + for _, block := range servicesContent.Blocks { + if block.Type != "module" { + continue + } + + moduleContent, _, moduleContentDiags := block.Body.PartialContent(moduleSchema) + diags = append(diags, moduleContentDiags...) + err := hasHclErrors(diags) + if err != nil { + return nil, err + } + + apisAttr, defined := moduleContent.Attributes["activate_apis"] + if !defined { + return nil, fmt.Errorf("activate_apis not defined for project module") + } + + diags = gohcl.DecodeExpression(apisAttr.Expr, nil, &s) + err = hasHclErrors(diags) + if err != nil { + return nil, err + } + + // because we're only interested in the top-level modules block + break + } + + return s, nil +} + +func hasHclErrors(diags hcl.Diagnostics) error { + for _, diag := range diags { + if diag.Severity == hcl.DiagError { + return fmt.Errorf("hcl error: %s | detail: %s", diag.Summary, diag.Detail) + } + } + + return nil +} + +// this is almost a dup of hasHclErrors because the TF api has two +// different structs for diagnostics... +func hasTfconfigErrors(diags tfconfig.Diagnostics) error { + for _, diag := range diags { + if diag.Severity == tfconfig.DiagError { + return fmt.Errorf("hcl error: %s | detail: %s", diag.Summary, diag.Detail) + } + } + + return nil +} + +// MergeExistingConnections merges existing connections from an old BlueprintInterface into a new one, +// preserving manually authored connections. +func mergeExistingConnections(newInterfaces, existingInterfaces *BlueprintInterface) { + if existingInterfaces == nil { + return // Nothing to merge if existingInterfaces is nil + } + + for i, variable := range newInterfaces.Variables { + for _, existingVariable := range existingInterfaces.Variables { + if variable.Name == existingVariable.Name && existingVariable.Connections != nil { + newInterfaces.Variables[i].Connections = existingVariable.Connections + } + } + } +} + +// mergeExistingOutputTypes merges existing output types from an old BlueprintInterface into a new one, +// preserving manually authored types. +func mergeExistingOutputTypes(newInterfaces, existingInterfaces *BlueprintInterface) { + if existingInterfaces == nil { + return // Nothing to merge if existingInterfaces is nil + } + + existingOutputs := make(map[string]*BlueprintOutput) + for _, output := range existingInterfaces.Outputs { + existingOutputs[output.Name] = output + } + + for i, output := range newInterfaces.Outputs { + if output.Type != nil { + continue + } + if existingOutput, ok := existingOutputs[output.Name]; ok && existingOutput.Type != nil { + newInterfaces.Outputs[i].Type = existingOutput.Type + } + } +} + +// UpdateOutputTypes generates the terraform.tfstate file, extracts output types from it, +// and updates the output types in the provided BlueprintInterface. +func updateOutputTypes(bpPath string, bpInterfaces *BlueprintInterface) error { + // Generate the terraform.tfstate file + stateData, err := tfState(bpPath) + if err != nil { + return fmt.Errorf("error generating terraform.tfstate file: %w", err) + } + + // Parse the state file and extract output types + outputTypes, err := parser.ParseOutputTypesFromState(stateData) + if err != nil { + return fmt.Errorf("error parsing output types: %w", err) + } + + // Update the output types in the BlueprintInterface + for i, output := range bpInterfaces.Outputs { + if outputType, ok := outputTypes[output.Name]; ok { + bpInterfaces.Outputs[i].Type = outputType + } + } + return nil +} + +// generateTFState generates the terraform.tfstate by running terraform init and apply, and terraform show to capture the state. +func generateTFState(bpPath string) ([]byte, error) { + var stateData []byte + // Construct the path to the test/setup directory + tfDir := filepath.Join(bpPath) + + // testing.T checks verbose flag to determine its mode. Add this line as a flags initializer + // so the program doesn't panic + flag.Parse() + runtimeT := testingiface.RuntimeT{} + + root := tft.NewTFBlueprintTest( + &runtimeT, + tft.WithTFDir(tfDir), // Setup test at the blueprint path, + ) + + root.DefineVerify(func(assert *assert.Assertions) { + stateStr, err := terraform.ShowE(&runtimeT, root.GetTFOptions()) + if err != nil { + assert.FailNowf("Failed to generate terraform.tfstate", "Error calling `terraform show`: %v", err) + } + + stateData = []byte(stateStr) + }) + + root.Test() // This will run terraform init and apply, and then destroy + + return stateData, nil +} diff --git a/cli/bpmetadata/tfconfig_test.go b/cli/bpmetadata/tfconfig_test.go new file mode 100644 index 00000000000..3d6c2a6fdff --- /dev/null +++ b/cli/bpmetadata/tfconfig_test.go @@ -0,0 +1,707 @@ +package bpmetadata + +import ( + "fmt" + "os" + "path" + "slices" + "testing" + + "github.com/hashicorp/hcl/v2/hclparse" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/types/known/structpb" +) + +const ( + tfTestdataPath = "../testdata/bpmetadata/tf" + metadataTestdataPath = "../testdata/bpmetadata/metadata" + interfaces = "sample-module" +) + +func TestTFInterfaces(t *testing.T) { + varTests := []struct { + name string + varName string + wantDescription string + wantVarType string + wantDefault interface{} + wantRequired bool + }{ + { + name: "just name and description", + varName: "project_id", + wantDescription: "The project ID to host the cluster in", + wantRequired: true, + }, + { + name: "with type and string default", + varName: "description", + wantDescription: "The description of the cluster", + wantVarType: "string", + wantDefault: "some description", + }, + { + name: "with required as false", + varName: "regional", + wantDescription: "Whether is a regional cluster", + wantVarType: "bool", + wantDefault: true, + }, + } + + outTests := []struct { + name string + outName string + wantDescription string + }{ + { + name: "just name and description", + outName: "cluster_id", + wantDescription: "Cluster ID", + }, + { + name: "more than just name and description", + outName: "endpoint", + wantDescription: "Cluster endpoint", + }, + } + + got, err := getBlueprintInterfaces(path.Join(tfTestdataPath, interfaces)) + require.NoError(t, err) + for _, tt := range varTests { + t.Run(tt.name, func(t *testing.T) { + i := slices.IndexFunc(got.Variables, func(v *BlueprintVariable) bool { return v.Name == tt.varName }) + if got.Variables[i].Name != tt.varName { + t.Errorf("getBlueprintInterfaces() - Variable.Name = %v, want %v", got.Variables[i].Name, tt.varName) + return + } + + if got.Variables[i].Description != tt.wantDescription { + t.Errorf("getBlueprintInterfaces() - Variable.Description = %v, want %v", got.Variables[i].Description, tt.wantDescription) + return + } + + if got.Variables[i].DefaultValue.AsInterface() != tt.wantDefault { + t.Errorf("getBlueprintInterfaces() - Variable.DefaultValue = %v, want %v", got.Variables[i].DefaultValue.AsInterface(), tt.wantDefault) + return + } + + if got.Variables[i].Required != tt.wantRequired { + t.Errorf("getBlueprintInterfaces() - Variable.Required = %v, want %v", got.Variables[i].Required, tt.wantRequired) + return + } + + if got.Variables[i].VarType != tt.wantVarType { + t.Errorf("getBlueprintInterfaces() - Variable.VarType = %v, want %v", got.Variables[i].VarType, tt.wantVarType) + return + } + }) + } + + for _, tt := range outTests { + t.Run(tt.name, func(t *testing.T) { + i := slices.IndexFunc(got.Outputs, func(o *BlueprintOutput) bool { return o.Name == tt.outName }) + if got.Outputs[i].Name != tt.outName { + t.Errorf("getBlueprintInterfaces() - Output.Name = %v, want %v", got.Outputs[i].Name, tt.outName) + return + } + + if got.Outputs[i].Description != tt.wantDescription { + t.Errorf("getBlueprintInterfaces() - Output.Description = %v, want %v", got.Outputs[i].Description, tt.wantDescription) + return + } + }) + } +} + +func TestTFVersions(t *testing.T) { + tests := []struct { + name string + configName string + wantRequiredVersion string + wantModuleVersion string + }{ + { + name: "core version only", + configName: "versions-core.tf", + wantRequiredVersion: ">= 0.13.0", + }, + { + name: "module version only", + configName: "versions-module.tf", + wantModuleVersion: "23.1.0", + }, + { + name: "bad module version good core version", + configName: "versions-bad-module.tf", + wantRequiredVersion: ">= 0.13.0", + wantModuleVersion: "", + }, + { + name: "bad core version good module version", + configName: "versions-bad-core.tf", + wantRequiredVersion: "", + wantModuleVersion: "23.1.0", + }, + { + name: "all bad", + configName: "versions-bad-all.tf", + wantRequiredVersion: "", + wantModuleVersion: "", + }, + { + name: "both versions", + configName: "versions.tf", + wantRequiredVersion: ">= 0.13.0", + wantModuleVersion: "23.1.0", + }, + { + name: "both versions with beta", + configName: "versions-beta.tf", + wantRequiredVersion: ">= 0.13.0", + wantModuleVersion: "23.1.0", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, _ := getBlueprintVersion(path.Join(tfTestdataPath, tt.configName)) + + if got != nil { + if got.requiredTfVersion != tt.wantRequiredVersion { + t.Errorf("getBlueprintVersion() = %v, want %v", got.requiredTfVersion, tt.wantRequiredVersion) + return + } + + if got.moduleVersion != tt.wantModuleVersion { + t.Errorf("getBlueprintVersion() = %v, want %v", got.moduleVersion, tt.wantModuleVersion) + return + } + } else { + if tt.wantModuleVersion != "" && tt.wantRequiredVersion != "" { + t.Errorf("getBlueprintVersion() = returned nil when we want core: %v and bpVersion: %v", tt.wantRequiredVersion, tt.wantModuleVersion) + } + } + }) + } +} + +func TestTFServices(t *testing.T) { + tests := []struct { + name string + configName string + wantServices []string + }{ + { + name: "simple list of apis", + configName: "main.tf", + wantServices: []string{ + "cloudkms.googleapis.com", + "cloudresourcemanager.googleapis.com", + "container.googleapis.com", + "pubsub.googleapis.com", + "serviceusage.googleapis.com", + "storage-api.googleapis.com", + "anthos.googleapis.com", + "anthosconfigmanagement.googleapis.com", + "logging.googleapis.com", + "meshca.googleapis.com", + "meshtelemetry.googleapis.com", + "meshconfig.googleapis.com", + "cloudresourcemanager.googleapis.com", + "monitoring.googleapis.com", + "stackdriver.googleapis.com", + "cloudtrace.googleapis.com", + "meshca.googleapis.com", + "iamcredentials.googleapis.com", + "gkeconnect.googleapis.com", + "privateca.googleapis.com", + "gkehub.googleapis.com", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + p := hclparse.NewParser() + content, _ := p.ParseHCLFile(path.Join(tfTestdataPath, tt.configName)) + got, err := parseBlueprintServices(content) + require.NoError(t, err) + assert.Equal(t, got, tt.wantServices) + }) + } +} + +func TestTFRoles(t *testing.T) { + tests := []struct { + name string + configName string + wantRoles []*BlueprintRoles + }{ + { + name: "simple list of roles", + configName: "iam.tf", + wantRoles: []*BlueprintRoles{ + { + Level: "Project", + Roles: []string{ + "roles/cloudsql.admin", + "roles/compute.networkAdmin", + "roles/iam.serviceAccountAdmin", + "roles/resourcemanager.projectIamAdmin", + "roles/storage.admin", + "roles/workflows.admin", + "roles/cloudscheduler.admin", + "roles/iam.serviceAccountUser", + }, + }, + }, + }, + { + name: "simple list of roles in order for multiple level", + configName: "iam-multi-level.tf", + wantRoles: []*BlueprintRoles{ + { + Level: "Project", + Roles: []string{ + "roles/owner", + "roles/storage.admin", + }, + }, + { + Level: "Project", + Roles: []string{ + "roles/cloudsql.admin", + "roles/compute.networkAdmin", + "roles/iam.serviceAccountAdmin", + "roles/resourcemanager.projectIamAdmin", + "roles/storage.admin", + "roles/workflows.admin", + "roles/cloudscheduler.admin", + "roles/iam.serviceAccountUser", + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + p := hclparse.NewParser() + content, _ := p.ParseHCLFile(path.Join(tfTestdataPath, tt.configName)) + got, err := parseBlueprintRoles(content) + require.NoError(t, err) + assert.Equal(t, got, tt.wantRoles) + }) + } +} + +func TestSortBlueprintRoles(t *testing.T) { + tests := []struct { + name string + in []*BlueprintRoles + want []*BlueprintRoles + }{ + { + name: "sort by level", + in: []*BlueprintRoles{ + { + Level: "Project", + Roles: []string{ + "roles/cloudsql.admin", + }, + }, + { + Level: "Folder", + Roles: []string{ + "roles/storage.admin", + }, + }, + }, + want: []*BlueprintRoles{ + { + Level: "Folder", + Roles: []string{ + "roles/storage.admin", + }, + }, + { + Level: "Project", + Roles: []string{ + "roles/cloudsql.admin", + }, + }, + }, + }, + { + name: "sort by length of roles", + in: []*BlueprintRoles{ + { + Level: "Project", + Roles: []string{ + "roles/storage.admin", + }, + }, + { + Level: "Project", + Roles: []string{ + "roles/cloudsql.admin", + "roles/owner", + }, + }, + }, + want: []*BlueprintRoles{ + { + Level: "Project", + Roles: []string{ + "roles/storage.admin", + }, + }, + { + Level: "Project", + Roles: []string{ + "roles/cloudsql.admin", + "roles/owner", + }, + }, + }, + }, + { + name: "sort by first role", + in: []*BlueprintRoles{ + { + Level: "Project", + Roles: []string{ + "roles/storage.admin", + }, + }, + { + Level: "Project", + Roles: []string{ + "roles/cloudsql.admin", + }, + }, + }, + want: []*BlueprintRoles{ + { + Level: "Project", + Roles: []string{ + "roles/cloudsql.admin", + }, + }, + { + Level: "Project", + Roles: []string{ + "roles/storage.admin", + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + sortBlueprintRoles(tt.in) + assert.Equal(t, tt.in, tt.want) + }) + } +} + +func TestTFProviderVersions(t *testing.T) { + tests := []struct { + name string + configName string + wantProviderVersions []*ProviderVersion + }{ + { + name: "Simple list of provider versions", + configName: "versions-beta.tf", + wantProviderVersions: []*ProviderVersion{ + { + Source: "hashicorp/google", + Version: ">= 4.4.0, < 7", + }, + { + Source: "hashicorp/google-beta", + Version: ">= 4.4.0, < 7", + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + p := hclparse.NewParser() + content, _ := p.ParseHCLFile(path.Join(tfTestdataPath, tt.configName)) + got, err := parseBlueprintProviderVersions(content) + require.NoError(t, err) + assert.Equal(t, got, tt.wantProviderVersions) + }) + } +} + +func TestMergeExistingConnections(t *testing.T) { + tests := []struct { + name string + newInterfacesFile string + existingInterfacesFile string + }{ + { + name: "No existing connections", + newInterfacesFile: "new_interfaces_no_connections_metadata.yaml", + existingInterfacesFile: "existing_interfaces_without_connections_metadata.yaml", + }, + { + name: "One existing connection is preserved", + newInterfacesFile: "new_interfaces_no_connections_metadata.yaml", + existingInterfacesFile: "existing_interfaces_with_one_connection_metadata.yaml", + }, + { + name: "Multiple existing connections are preserved", + newInterfacesFile: "new_interfaces_no_connections_metadata.yaml", + existingInterfacesFile: "existing_interfaces_with_some_connections_metadata.yaml", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Load new interfaces from file + newInterfaces, err := UnmarshalMetadata(metadataTestdataPath, tt.newInterfacesFile) + require.NoError(t, err) + + // Load existing interfaces from file + existingInterfaces, err := UnmarshalMetadata(metadataTestdataPath, tt.existingInterfacesFile) + require.NoError(t, err) + + // Perform the merge + mergeExistingConnections(newInterfaces.Spec.Interfaces, existingInterfaces.Spec.Interfaces) + + // Assert that the merged interfaces match the existing ones + assert.Equal(t, existingInterfaces.Spec.Interfaces, newInterfaces.Spec.Interfaces) + }) + } +} + +func TestMergeExistingOutputTypes(t *testing.T) { + tests := []struct { + name string + newInterfacesFile string + existingInterfacesFile string + expectedInterfacesFile string + }{ + { + name: "No existing types", + newInterfacesFile: "interfaces_without_output_types_metadata.yaml", + existingInterfacesFile: "interfaces_without_output_types_metadata.yaml", + expectedInterfacesFile: "interfaces_without_output_types_metadata.yaml", + }, + { + name: "One complex existing type is preserved", + newInterfacesFile: "interfaces_without_output_types_metadata.yaml", + existingInterfacesFile: "interfaces_with_partial_output_types_metadata.yaml", + expectedInterfacesFile: "interfaces_with_partial_output_types_metadata.yaml", + }, + { + name: "All existing types (both simple and complex) are preserved", + newInterfacesFile: "interfaces_without_output_types_metadata.yaml", + existingInterfacesFile: "interfaces_with_full_output_types_metadata.yaml", + expectedInterfacesFile: "interfaces_with_full_output_types_metadata.yaml", + }, + { + name: "Previous types are not overwriting newly generated types", + newInterfacesFile: "interfaces_with_new_output_types_metadata.yaml", + existingInterfacesFile: "interfaces_with_partial_output_types_metadata.yaml", + expectedInterfacesFile: "interfaces_with_new_output_types_metadata.yaml", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Load new interfaces from file + newInterfaces, err := UnmarshalMetadata(metadataTestdataPath, tt.newInterfacesFile) + require.NoError(t, err) + + // Load existing interfaces from file + existingInterfaces, err := UnmarshalMetadata(metadataTestdataPath, tt.existingInterfacesFile) + require.NoError(t, err) + + // Perform the merge + mergeExistingOutputTypes(newInterfaces.Spec.Interfaces, existingInterfaces.Spec.Interfaces) + + // Load expected interfaces from file + expectedInterfaces, err := UnmarshalMetadata(metadataTestdataPath, tt.expectedInterfacesFile) + require.NoError(t, err) + + // Assert that the merged interfaces match the expected outcome + assert.Equal(t, expectedInterfaces.Spec.Interfaces, newInterfaces.Spec.Interfaces) + }) + } +} + +func TestTFIncompleteProviderVersions(t *testing.T) { + tests := []struct { + name string + configName string + }{ + { + name: "Empty list of provider versions", + configName: "provider-versions-empty.tf", + }, + { + name: "Missing ProviderVersion field", + configName: "provider-versions-bad.tf", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + p := hclparse.NewParser() + content, _ := p.ParseHCLFile(path.Join(tfTestdataPath, tt.configName)) + got, err := parseBlueprintProviderVersions(content) + require.NoError(t, err) + assert.Nil(t, got) + }) + } +} + +func TestTFVariableSortOrder(t *testing.T) { + tests := []struct { + name string + configPath string + expectOrders map[string]int + expectError bool + }{ + { + name: "Variable order should match tf input", + configPath: "sample-module", + expectOrders: map[string]int{ + "description": 1, + "project_id": 0, + "regional": 2, + }, + expectError: false, + }, + { + name: "Empty variable name should create nil order", + configPath: "empty-module", + expectOrders: map[string]int{}, + expectError: true, + }, + { + name: "No variable name should create nil order", + configPath: "invalid-module", + expectOrders: map[string]int{}, + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := getBlueprintVariableOrders(path.Join(tfTestdataPath, tt.configPath)) + if tt.expectError { + assert.Error(t, err) + assert.Nil(t, got) + } else { + require.NoError(t, err) + assert.Equal(t, got, tt.expectOrders) + } + }) + } +} + +func TestUpdateOutputTypes(t *testing.T) { + tests := []struct { + name string + bpPath string + interfacesFile string + stateFile string + expectedOutputs []*BlueprintOutput + expectError bool + }{ + { + name: "Update output types from state", + bpPath: "sample-module", + interfacesFile: "interfaces_without_output_types_metadata.yaml", + stateFile: "terraform.tfstate", + expectedOutputs: []*BlueprintOutput{ + { + Name: "cluster_id", + Description: "Cluster ID", + Type: structpb.NewStringValue("string"), + }, + { + Name: "endpoint", + Description: "Cluster endpoint", + Type: &structpb.Value{ + Kind: &structpb.Value_ListValue{ + ListValue: &structpb.ListValue{ + Values: []*structpb.Value{ + { + Kind: &structpb.Value_StringValue{ + StringValue: "object", + }, + }, + { + Kind: &structpb.Value_StructValue{ + StructValue: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "host": { + Kind: &structpb.Value_StringValue{ + StringValue: "string", + }, + }, + "port": { + Kind: &structpb.Value_StringValue{ + StringValue: "number", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Load interfaces from file + bpInterfaces, err := UnmarshalMetadata(metadataTestdataPath, tt.interfacesFile) + require.NoError(t, err) + + // Override with a function that reads a hard-coded tfstate file. + tfState = func(_ string) ([]byte, error) { + if tt.expectError { + return nil, fmt.Errorf("simulated error generating state file") + } + // Copy the test state file to the bpPath + stateFilePath := path.Join(tfTestdataPath, tt.bpPath, tt.stateFile) + stateData, err := os.ReadFile(stateFilePath) + if err != nil { + return nil, fmt.Errorf("error reading state file: %w", err) + } + return stateData, nil + } + + // Update output types + err = updateOutputTypes(path.Join(tfTestdataPath, tt.bpPath), bpInterfaces.Spec.Interfaces) + + if tt.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + // Assert that the output types are updated correctly + expectedOutputsStr := fmt.Sprintf("%v", tt.expectedOutputs) + actualOutputsStr := fmt.Sprintf("%v", bpInterfaces.Spec.Interfaces.Outputs) + assert.Equal(t, expectedOutputsStr, actualOutputsStr) + } + }) + } +} diff --git a/cli/bpmetadata/validate.go b/cli/bpmetadata/validate.go new file mode 100644 index 00000000000..893d9ab8e82 --- /dev/null +++ b/cli/bpmetadata/validate.go @@ -0,0 +1,100 @@ +package bpmetadata + +import ( + _ "embed" + "fmt" + "os" + "path" + + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/cli/util" + "github.com/xeipuuv/gojsonschema" + "sigs.k8s.io/yaml" +) + +//go:embed schema/gcp-blueprint-metadata.json +var s []byte + +// validateMetadata validates the metadata files for the provided +// blueprint path. This validation occurs for top-level blueprint +// metadata and blueprints in the modules/ folder, if present +func validateMetadata(bpPath, wdPath string) error { + // load schema from the binary + schemaLoader := gojsonschema.NewStringLoader(string(s)) + + // check if the provided output path is relative + if !path.IsAbs(bpPath) { + bpPath = path.Join(wdPath, bpPath) + } + + // We don't need to validate metadata under .terraform folders + skipDirsToValidate := []string{".terraform/"} + metadataFiles, err := util.FindFilesWithPattern(bpPath, `^metadata(?:.display)?.yaml$`, skipDirsToValidate) + if err != nil { + Log.Error("unable to read at: %s", bpPath, "err", err) + } + + var vErrs []error + for _, f := range metadataFiles { + err = validateMetadataYaml(f, schemaLoader) + if err != nil { + vErrs = append(vErrs, err) + Log.Error("core metadata validation failed", "err", err) + } + } + + if len(vErrs) > 0 { + return fmt.Errorf("metadata validation failed for at least one blueprint") + } + + return nil +} + +// validateMetadata validates an individual yaml file present at path "m" +func validateMetadataYaml(m string, schema gojsonschema.JSONLoader) error { + // prepare metadata for validation by converting it from YAML to JSON + mBytes, err := convertYamlToJson(m) + if err != nil { + return fmt.Errorf("yaml to json conversion failed for metadata at path %s. error: %w", m, err) + } + + // load metadata from the path + yamlLoader := gojsonschema.NewStringLoader(string(mBytes)) + + // validate metadata against the schema + result, err := gojsonschema.Validate(schema, yamlLoader) + if err != nil { + return fmt.Errorf("metadata validation failed for %s. error: %w", m, err) + } + + if !result.Valid() { + for _, e := range result.Errors() { + Log.Error("validation error", "err", e) + } + + return fmt.Errorf("metdata validation failed for: %s", m) + } + + Log.Info("metadata is valid", "path", m) + return nil +} + +// prepares metadata bytes for validation since direct +// validation of YAML is not possible +func convertYamlToJson(m string) ([]byte, error) { + // read metadata for validation + b, err := os.ReadFile(m) + if err != nil { + return nil, fmt.Errorf("unable to read metadata at path %s. error: %w", m, err) + } + + if len(b) == 0 { + return nil, fmt.Errorf("metadata contents can not be empty") + } + + json, err := yaml.YAMLToJSON(b) + if err != nil { + return nil, fmt.Errorf("metadata contents are invalid: %s", err.Error()) + } + + return json, nil +} diff --git a/cli/bpmetadata/validate_test.go b/cli/bpmetadata/validate_test.go new file mode 100644 index 00000000000..a8a142a2daf --- /dev/null +++ b/cli/bpmetadata/validate_test.go @@ -0,0 +1,68 @@ +package bpmetadata + +import ( + "path" + "testing" + + "github.com/xeipuuv/gojsonschema" +) + +const ( + yamlTestDirPath = "../testdata/bpmetadata/schema" +) + +func TestValidateMetadata(t *testing.T) { + tests := []struct { + name string + path string + wantErr bool + }{ + { + name: "empty metadata", + path: "empty-metadata.yaml", + wantErr: true, + }, + { + name: "valid metadata", + path: "valid-metadata.yaml", + wantErr: false, + }, + { + name: "valid metadata with connections", + path: "valid-metadata-connections.yaml", + wantErr: false, + }, + { + name: "valid display metadata with alternate defaults", + path: "valid-display-metadata-alternate-defaults.yaml", + wantErr: false, + }, + { + name: "invalid metadata - title missing", + path: "invalid-metadata.yaml", + wantErr: true, + }, + { + name: "valid enums for QuotaType", + path: "valid-metadata-w-enum.yaml", + }, + { + name: "invalid enums for QuotaResourceType", + path: "invalid-metadata-w-enum.yaml", + wantErr: true, + }, + } + + // load schema from the binary + s := gojsonschema.NewReferenceLoader("file://schema/gcp-blueprint-metadata.json") + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := validateMetadataYaml(path.Join(yamlTestDirPath, tt.path), s) + if (err != nil) != tt.wantErr { + t.Errorf("validateMetadataYaml() error = %v, wantErr %v", err, tt.wantErr) + return + } + }) + } +} diff --git a/cli/bptest/ast.go b/cli/bptest/ast.go new file mode 100644 index 00000000000..7f2ec1cdccf --- /dev/null +++ b/cli/bptest/ast.go @@ -0,0 +1,29 @@ +package bptest + +import ( + "go/ast" + "go/parser" + "go/token" + "strings" +) + +// getTestFuncsFromFile parses a go source file and returns slice of test function names +func getTestFuncsFromFile(filePath string) ([]string, error) { + fileSet := token.NewFileSet() + f, err := parser.ParseFile(fileSet, filePath, nil, parser.AllErrors) + if err != nil { + return nil, err + } + testFuncs := make([]string, 0) + for _, decl := range f.Decls { + funcDecl, ok := decl.(*ast.FuncDecl) + // not a function declaration + if !ok { + continue + } + if strings.HasPrefix(funcDecl.Name.Name, "Test") { + testFuncs = append(testFuncs, funcDecl.Name.Name) + } + } + return testFuncs, nil +} diff --git a/cli/bptest/ast_test.go b/cli/bptest/ast_test.go new file mode 100644 index 00000000000..ac67b84bc86 --- /dev/null +++ b/cli/bptest/ast_test.go @@ -0,0 +1,80 @@ +package bptest + +import ( + "os" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestGetTestFuncsFromFile(t *testing.T) { + tests := []struct { + name string + data string + want []string + errMsg string + }{ + { + name: "simple", + data: `package test + +import "testing" + +func TestA(t *testing.T) { +} +`, + want: []string{"TestA"}, + }, + { + name: "multiple", + data: `package test + +import "testing" + +const ShouldNotErr = "foo" + +func TestA(t *testing.T) { +} + +func TestB(t *testing.T) { +} + +func OtherHelper(t *testing.T) { +} +`, + want: []string{"TestA", "TestB"}, + }, + { + name: "empty", + data: `package test +`, + want: []string{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert := assert.New(t) + filePath, cleanup := writeTmpFile(t, tt.data) + defer cleanup() + got, err := getTestFuncsFromFile(filePath) + if tt.errMsg != "" { + assert.NotNil(err) + assert.Contains(err.Error(), tt.errMsg) + } else { + assert.NoError(err) + assert.ElementsMatch(tt.want, got) + } + }) + } +} + +func writeTmpFile(t *testing.T, data string) (string, func()) { + assert := assert.New(t) + f, err := os.CreateTemp("", "*.go") + assert.NoError(err) + cleanup := func() { os.Remove(f.Name()) } + _, err = f.Write([]byte(data)) + assert.NoError(err) + f.Close() + return f.Name(), cleanup +} diff --git a/cli/bptest/blueprint_connection_source_version_rule.go b/cli/bptest/blueprint_connection_source_version_rule.go new file mode 100644 index 00000000000..d5c26172043 --- /dev/null +++ b/cli/bptest/blueprint_connection_source_version_rule.go @@ -0,0 +1,46 @@ +package bptest + +import ( + "fmt" + "github.com/hashicorp/go-version" +) + +type BlueprintConnectionSourceVersionRule struct{} + +func (r *BlueprintConnectionSourceVersionRule) name() string { + return "blueprint_connection_source_version_rule" +} + +func (r *BlueprintConnectionSourceVersionRule) enabled() bool { + return true +} + +func (r *BlueprintConnectionSourceVersionRule) check(ctx lintContext) error { + // Check if Spec or Interfaces is nil to avoid null pointer dereference + if ctx.metadata == nil || ctx.metadata.Spec == nil || ctx.metadata.Spec.Interfaces == nil { + fmt.Println("metadata, spec, or interfaces are nil") + return nil + } + + for _, variable := range ctx.metadata.Spec.Interfaces.Variables { + if variable == nil { + continue // Skip if variable is nil + } + + for _, conn := range variable.Connections { + if conn == nil || conn.Source == nil { + continue // Skip if connection or source is nil + } + + if conn.Source.Version != "" { + _, err := version.NewConstraint(conn.Source.Version) + if err != nil { + return fmt.Errorf("invalid version: %w", err) + } + return nil + } + } + } + + return nil +} diff --git a/cli/bptest/blueprint_connection_source_version_rule_test.go b/cli/bptest/blueprint_connection_source_version_rule_test.go new file mode 100644 index 00000000000..08010325b8e --- /dev/null +++ b/cli/bptest/blueprint_connection_source_version_rule_test.go @@ -0,0 +1,86 @@ +package bptest + +import ( + "testing" + + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/cli/bpmetadata" + "github.com/stretchr/testify/assert" +) + +func TestBlueprintConnectionSourceVersionRule(t *testing.T) { + tests := []struct { + name string + version string + expectErr bool + errorMessage string + }{ + { + name: "Valid version - no equal sign expect version", + version: "1.2.3", + expectErr: false, + }, + { + name: "Valid version - expect version", + version: "=1.2.3", + expectErr: false, + }, + { + name: "Valid version - pessimistic constraint", + version: "~> 6.0", + expectErr: false, + }, + { + name: "Valid version - minimal version", + version: ">= 0.13.7", + expectErr: false, + }, + { + name: "Valid version - range interval", + version: ">= 0.13.7, < 2.0.0", + expectErr: false, + }, + { + name: "Invalid version - random string", + version: "invalid_version", + expectErr: true, + errorMessage: "invalid_version", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + metadata := &bpmetadata.BlueprintMetadata{ + Spec: &bpmetadata.BlueprintMetadataSpec{ + Interfaces: &bpmetadata.BlueprintInterface{ + Variables: []*bpmetadata.BlueprintVariable{ + { + Connections: []*bpmetadata.BlueprintConnection{ + { + Source: &bpmetadata.ConnectionSource{ + Source: "example/source", + Version: tt.version, + }, + }, + }, + }, + }, + }, + }, + } + + ctx := lintContext{ + metadata: metadata, + } + + rule := &BlueprintConnectionSourceVersionRule{} + err := rule.check(ctx) + + if tt.expectErr { + assert.Error(t, err) + assert.Contains(t, err.Error(), tt.errorMessage) + } else { + assert.NoError(t, err) + } + }) + } +} diff --git a/cli/bptest/cmd.go b/cli/bptest/cmd.go new file mode 100644 index 00000000000..1f9bc189653 --- /dev/null +++ b/cli/bptest/cmd.go @@ -0,0 +1,159 @@ +package bptest + +import ( + "fmt" + "os" + "path" + + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/cli/util" + "github.com/jedib0t/go-pretty/v6/table" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var flags struct { + testDir string + testStage string + setupVars map[string]string +} + +func init() { + viper.AutomaticEnv() + Cmd.AddCommand(listCmd) + Cmd.AddCommand(runCmd) + Cmd.AddCommand(convertCmd) + Cmd.AddCommand(initCmd) + Cmd.AddCommand(lintCmd) + + Cmd.PersistentFlags().StringVar(&flags.testDir, "test-dir", "", "Path to directory containing integration tests (default is computed by scanning current working directory)") + runCmd.Flags().StringVar(&flags.testStage, "stage", "", "Test stage to execute (default is running all stages in order - init, plan, apply, verify, teardown)") + runCmd.Flags().StringToStringVar(&flags.setupVars, "setup-var", map[string]string{}, "Specify outputs from the setup phase (useful with --stage=verify)") +} + +var Cmd = &cobra.Command{ + Use: "test", + Aliases: []string{"bptest"}, + Short: "Blueprint test CLI", + Long: `Blueprint test CLI is used to actuate the Blueprint test framework used for testing KRM and Terraform Blueprints`, + Args: cobra.NoArgs, +} + +var listCmd = &cobra.Command{ + Use: "list", + Short: "list tests", + Long: "Lists both auto discovered and explicit integration tests", + + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + intTestDir := flags.testDir + tests, err := getTests(intTestDir) + if err != nil { + return err + } + // Warn if no tests found + if len(tests) < 1 { + Log.Warn("no tests discovered") + return nil + } + tbl := newTable() + tbl.AppendHeader(table.Row{"Name", "Config", "Location"}) + for _, t := range tests { + if t.bptestCfg.Spec.Skip { + Log.Info(fmt.Sprintf("skipping %s due to BlueprintTest config %s", t.name, t.bptestCfg.Name)) + continue + } + tbl.AppendRow(table.Row{t.name, t.config, t.location}) + } + tbl.Render() + return nil + }, +} + +var runCmd = &cobra.Command{ + Use: "run", + Short: "run tests", + Long: "Runs auto discovered and explicit integration tests", + + Args: func(cmd *cobra.Command, args []string) error { + if err := cobra.ExactArgs(1)(cmd, args); err != nil { + return err + } + return nil + }, + RunE: func(cmd *cobra.Command, args []string) error { + intTestDir, err := getIntTestDir(flags.testDir) + if err != nil { + return fmt.Errorf("error discovering test dir: %w", err) + } + testStage, err := validateAndGetStage(flags.testStage) + if err != nil { + return err + } + relTestPkg, err := validateAndGetRelativeTestPkg(intTestDir, args[0]) + if err != nil { + return err + } + testCmd, err := getTestCmd(intTestDir, testStage, args[0], relTestPkg, flags.setupVars) + if err != nil { + return err + } + // if err during exec, exit instead of returning an error + // this prevents printing usage as the args were validated above + if err := streamExec(testCmd); err != nil { + Log.Error(err.Error()) + os.Exit(1) + } + return nil + }, +} + +var convertCmd = &cobra.Command{ + Use: "convert", + Short: "convert kitchen tests (experimental)", + Long: "Convert all kitchen tests to blueprint tests (experimental)", + + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return convertKitchenTests() + }, +} + +var initCmd = &cobra.Command{ + Use: "init", + Short: "initialize blueprint test", + Long: "Initialize a new blueprint test", + Args: cobra.MaximumNArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + var initTestName string + // if no args, prompt user to select from examples + if len(args) < 1 { + cwd, err := os.Getwd() + if err != nil { + return err + } + examplePaths, err := util.WalkTerraformDirs(path.Join(cwd, "examples")) + if err != nil { + return err + } + exampleNames := make([]string, 0, len(examplePaths)) + for _, examplePath := range examplePaths { + exampleNames = append(exampleNames, path.Base(examplePath)) + } + initTestName = util.PromptSelect("Select example for test", exampleNames) + } else { + initTestName = args[0] + } + return initTest(initTestName) + }, +} + +var lintCmd = &cobra.Command{ + Use: "lint", + Short: "Lints blueprint", + Long: "Lints TF blueprint", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + RunLintCommand() + return nil + }, +} diff --git a/cli/bptest/convert.go b/cli/bptest/convert.go new file mode 100644 index 00000000000..3b126879c5b --- /dev/null +++ b/cli/bptest/convert.go @@ -0,0 +1,260 @@ +package bptest + +import ( + "bytes" + "embed" + "encoding/json" + "fmt" + "html/template" + "os" + "path" + "strings" + + "github.com/iancoleman/strcase" + cb "google.golang.org/api/cloudbuild/v1" + "sigs.k8s.io/yaml" +) + +const ( + intTestPath = "test/integration" + intTestBuildFilePath = "build/int.cloudbuild.yaml" + inspecInputsFile = "inspec.yml" + tmplSuffix = ".tmpl" + goModFilename = "go.mod" + bptTestFilename = "blueprint_test.go" +) + +var ( + //go:embed templates + templateFiles embed.FS + kitchenCFTStageMapping = map[string]string{ + "create": stages[0], + "converge": stages[2], + "verify": stages[3], + "destroy": stages[4], + } +) + +type inspecInputs struct { + Name string `yaml:"name"` + Attributes []struct { + Name string `yaml:"name"` + } `yaml:"attributes"` +} + +// convertKitchenTests converts all kitchen tests to blueprint tests and updates build files +func convertKitchenTests() error { + cwd, err := os.Getwd() + if err != nil { + return err + } + // write go mod + goMod, err := getTmplFileContents(goModFilename) + if err != nil { + return err + } + err = writeFile(path.Join(intTestPath, goModFilename), fmt.Sprintf(goMod, path.Base(cwd))) + if err != nil { + return fmt.Errorf("error writing go mod file: %w", err) + } + // write discover test + discoverTest, err := getTmplFileContents(discoverTestFilename) + if err != nil { + return err + } + err = writeFile(path.Join(intTestPath, discoverTestFilename), discoverTest) + if err != nil { + return fmt.Errorf("error writing discover_test.go: %w", err) + } + testDirs, err := getCurrentTestDirs() + if err != nil { + return fmt.Errorf("error getting current test dirs: %w", err) + } + for _, dir := range testDirs { + err = convertTest(path.Join(intTestPath, dir)) + if err != nil { + return fmt.Errorf("error converting %s: %w", dir, err) + } + } + // remove kitchen + err = os.Remove(".kitchen.yml") + if err != nil { + return fmt.Errorf("error removing .kitchen.yml: %w", err) + } + // convert build file + // We use build to identify commands to update and update the commands in the buildFile. + // This minimizes unnecessary diffs in build yaml due to round tripping. + build, buildFile, err := getBuildFromFile(intTestBuildFilePath) + if err != nil { + return fmt.Errorf("error unmarshalling %s: %w", intTestBuildFilePath, err) + } + newBuildFile, err := transformBuild(build, buildFile) + if err != nil { + return fmt.Errorf("error transforming buildfile: %w", err) + } + return writeFile(intTestBuildFilePath, newBuildFile) +} + +// getCurrentTestDirs returns current test dirs in intTestPath +func getCurrentTestDirs() ([]string, error) { + files, err := os.ReadDir(intTestPath) + if err != nil { + return nil, err + } + var dirs []string + for _, f := range files { + if f.IsDir() { + dirs = append(dirs, f.Name()) + } + } + return dirs, nil +} + +// convertTest converts a kitchen test in dir to blueprint test +func convertTest(dir string) error { + // read inspec.yaml + f, err := os.ReadFile(path.Join(dir, inspecInputsFile)) + if err != nil { + return fmt.Errorf("error reading inspec file: %w", err) + } + var inspec inspecInputs + err = yaml.Unmarshal(f, &inspec) + if err != nil { + return fmt.Errorf("error unmarshalling inspec file: %w", err) + } + // get inspec input attributes + var inputs []string + for _, i := range inspec.Attributes { + inputs = append(inputs, i.Name) + } + // get bpt skeleton + testName := path.Base(dir) + bpTest, err := getBPTestFromTmpl(testName, inputs) + if err != nil { + return fmt.Errorf("error creating blueprint test: %w", err) + } + // remove old test + err = os.RemoveAll(dir) + if err != nil { + return fmt.Errorf("error removing old test dir: %w", err) + } + // write bpt + err = os.MkdirAll(dir, os.ModePerm) + if err != nil { + return fmt.Errorf("error creating test dir: %w", err) + } + return writeFile(path.Join(dir, fmt.Sprintf("%s_test.go", strcase.ToSnake(testName))), bpTest) +} + +// getTmplFileContents returns contents of embedded file f +func getTmplFileContents(f string) (string, error) { + tmplF := path.Join("templates", fmt.Sprintf("%s%s", f, tmplSuffix)) + contents, err := templateFiles.ReadFile(tmplF) + if err != nil { + return "", fmt.Errorf("error reading %s : %w", tmplF, err) + } + return string(contents), nil +} + +// getTestFnName returns the go test function name +func getTestFnName(name string) string { + return fmt.Sprintf("Test%s", strcase.ToCamel(name)) +} + +// getBPTestFromTmpl returns a skeleton blueprint test +func getBPTestFromTmpl(testName string, inputs []string) (string, error) { + pkgName := strcase.ToSnake(testName) + fnName := getTestFnName(testName) + tmpl, err := getTmplFileContents(bptTestFilename) + if err != nil { + return "", err + } + t, err := template.New("test").Funcs(template.FuncMap{"toLowerCamel": strcase.ToLowerCamel}).Parse(tmpl) + if err != nil { + return "", err + } + var tpl bytes.Buffer + err = t.Execute(&tpl, struct { + PkgName string + FnName string + Inputs []string + }{ + PkgName: pkgName, + FnName: fnName, + Inputs: inputs, + }, + ) + if err != nil { + return "", err + } + return tpl.String(), nil +} + +// writeFile writes content to file path +func writeFile(p string, content string) error { + return os.WriteFile(p, []byte(content), os.ModePerm) +} + +// transformBuild transforms cloudbuild file contents with kitchen commands to CFT cli commands +func transformBuild(b *cb.Build, f string) (string, error) { + for _, step := range b.Steps { + // test commands have at least two args + if len(step.Args) < 2 { + continue + } + cmd := step.Args[len(step.Args)-1] + // skip if not a kitchen command + kitchenCmdIndex := strings.Index(cmd, "kitchen_do") + if kitchenCmdIndex == -1 { + continue + } + kitchenCmd := cmd[kitchenCmdIndex:] + newCmd, err := getCFTCmd(kitchenCmd) + if err != nil { + return "", err + } + f = strings.ReplaceAll(f, cmd, newCmd) + } + return f, nil +} + +// getCFTCmd returns an equivalent CFT command for a kitchen command +func getCFTCmd(kitchenCmd string) (string, error) { + if !strings.Contains(kitchenCmd, "kitchen_do") { + return "", fmt.Errorf("invalid kitchen command: %s", kitchenCmd) + } + cmdArr := strings.Split(kitchenCmd, " ") + cftCmd := []string{"cft", "test", "run"} + // cmd of form kitchen_do verb + if len(cmdArr) == 2 { + kitchenStage := cmdArr[len(cmdArr)-1] + cftCmd = append(cftCmd, []string{"all", "--stage", kitchenCFTStageMapping[kitchenStage]}...) + } else if len(cmdArr) == 3 { + // cmd of form kitchen_do verb test-name + kitchenTestName := cmdArr[len(cmdArr)-1] + kitchenStage := cmdArr[len(cmdArr)-2] + cftTestName := getTestFnName(strings.TrimSuffix(kitchenTestName, "-local")) + cftCmd = append(cftCmd, []string{cftTestName, "--stage", kitchenCFTStageMapping[kitchenStage]}...) + } else { + return "", fmt.Errorf("unknown kitchen command: %s", kitchenCmd) + } + cftCmd = append(cftCmd, "--verbose") + return strings.Join(cftCmd, " "), nil +} + +// getBuildFromFile unmarshalls a cloudbuild file +func getBuildFromFile(fp string) (*cb.Build, string, error) { + f, err := os.ReadFile(fp) + if err != nil { + return nil, "", err + } + j, err := yaml.YAMLToJSON(f) + if err != nil { + return nil, "", err + } + var b cb.Build + if err = json.Unmarshal(j, &b); err != nil { + fmt.Println(err.Error()) + } + return &b, string(f), nil +} diff --git a/cli/bptest/convert_test.go b/cli/bptest/convert_test.go new file mode 100644 index 00000000000..b47c735647e --- /dev/null +++ b/cli/bptest/convert_test.go @@ -0,0 +1,161 @@ +package bptest + +import ( + "os" + "path" + "testing" + + "github.com/otiai10/copy" + "github.com/stretchr/testify/assert" +) + +const ( + cbTestDataDir = "testdata/cb" + kitchenTestData = "testdata/kitchen-tests" +) + +func TestGetCFTCmd(t *testing.T) { + tests := []struct { + name string + kitchenCmd string + want string + errMsg string + }{ + { + name: "simple", + kitchenCmd: "kitchen_do create", + want: "cft test run all --stage init --verbose", + }, + { + name: "explicit test", + kitchenCmd: "kitchen_do converge foo", + want: "cft test run TestFoo --stage apply --verbose", + }, + { + name: "not kitchen", + kitchenCmd: "foo verify bar", + errMsg: "invalid kitchen command: foo verify bar", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert := assert.New(t) + got, err := getCFTCmd(tt.kitchenCmd) + if tt.errMsg != "" { + assert.NotNil(err) + assert.Contains(err.Error(), tt.errMsg) + } else { + assert.NoError(err) + assert.Equal(tt.want, got) + } + }) + } +} + +func TestTransformBuild(t *testing.T) { + tests := []struct { + name string + fp string + wantFp string + errMsg string + }{ + { + name: "simple", + fp: path.Join(cbTestDataDir, "oldAll.yaml"), + wantFp: path.Join(cbTestDataDir, "newAll.yaml"), + }, + { + name: "targeted", + fp: path.Join(cbTestDataDir, "oldTarget.yaml"), + wantFp: path.Join(cbTestDataDir, "newTarget.yaml"), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert := assert.New(t) + b, bf, err := getBuildFromFile(tt.fp) + assert.NoError(err) + gotBf, err := transformBuild(b, bf) + if tt.errMsg != "" { + assert.NotNil(err) + assert.Contains(err.Error(), tt.errMsg) + } else { + assert.NoError(err) + _, wbf, err := getBuildFromFile(tt.wantFp) + assert.NoError(err) + assert.Equal(wbf, gotBf) + } + }) + } +} + +func TestConvertTest(t *testing.T) { + tests := []struct { + name string + dir string + expectedFilesContents map[string]string + errMsg string + }{ + { + name: "simple", + dir: "simple-example", + expectedFilesContents: map[string]string{"simple_example_test.go": `package simple_example + +import ( + "fmt" + "testing" + + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/gcloud" + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/tft" + "github.com/stretchr/testify/assert" +) + +func TestSimpleExample(t *testing.T) { + bpt := tft.NewTFBlueprintTest(t) + + bpt.DefineVerify(func(assert *assert.Assertions) { + bpt.DefaultVerify(assert) + + projectId := bpt.GetStringOutput("project_id") + location := bpt.GetStringOutput("location") + clusterName := bpt.GetStringOutput("cluster_name") + masterKubernetesVersion := bpt.GetStringOutput("master_kubernetes_version") + kubernetesEndpoint := bpt.GetStringOutput("kubernetes_endpoint") + clientToken := bpt.GetStringOutput("client_token") + serviceAccount := bpt.GetStringOutput("service_account") + serviceAccount := bpt.GetStringOutput("service_account") + databaseEncryptionKeyName := bpt.GetStringOutput("database_encryption_key_name") + identityNamespace := bpt.GetStringOutput("identity_namespace") + + op := gcloud.Run(t,"") + assert.Contains(op.Get("result").String(), "foo", "contains foo") + }) + + bpt.Test() +} +`}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert := assert.New(t) + tmpDir := path.Join(t.TempDir(), tt.dir) + err := copy.Copy(path.Join(kitchenTestData, tt.dir), tmpDir) + assert.NoError(err) + err = convertTest(tmpDir) + if tt.errMsg != "" { + assert.NotNil(err) + assert.Contains(err.Error(), tt.errMsg) + } else { + assert.NoError(err) + for name, expectedContent := range tt.expectedFilesContents { + pth := path.Join(tmpDir, name) + assert.FileExists(pth) + gotContents, err := os.ReadFile(pth) + assert.NoError(err) + assert.Equal(expectedContent, string(gotContents)) + } + } + }) + } +} diff --git a/cli/bptest/init.go b/cli/bptest/init.go new file mode 100644 index 00000000000..f522db4a949 --- /dev/null +++ b/cli/bptest/init.go @@ -0,0 +1,73 @@ +package bptest + +import ( + "fmt" + "os" + "path" + + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/cli/util" + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/discovery" + "github.com/hashicorp/terraform-config-inspect/tfconfig" + "github.com/iancoleman/strcase" +) + +func initTest(name string) error { + // check if test already exist + testDir := path.Join(intTestPath, name) + exists, err := util.Exists(testDir) + if err != nil { + return err + } + if exists { + return fmt.Errorf("%s already exists", testDir) + } + + // write go mod if not exists + goModpath := path.Join(intTestPath, goModFilename) + exists, err = util.Exists(goModpath) + if err != nil { + return err + } + if !exists { + goMod, err := getTmplFileContents(goModFilename) + if err != nil { + return err + } + cwd, err := os.Getwd() + if err != nil { + return err + } + err = writeFile(goModpath, fmt.Sprintf(goMod, path.Base(cwd))) + if err != nil { + return fmt.Errorf("error writing go mod file: %w", err) + } + } + + // discover test configs + testCfg, err := discovery.GetConfigDirFromTestDir(testDir) + if err != nil { + return fmt.Errorf("unable to discover test configs for %s: %w", testDir, err) + } + + // Parse config to expose outputs within test + mod, diags := tfconfig.LoadModule(testCfg) + if diags.HasErrors() { + return fmt.Errorf("error parsing outputs: %w", diags) + } + outputs := make([]string, 0, len(mod.Outputs)) + for _, op := range mod.Outputs { + // todo(bharathkkb): make templates type aware + outputs = append(outputs, op.Name) + } + + // render and write test + testFile, err := getBPTestFromTmpl(name, outputs) + if err != nil { + return fmt.Errorf("error creating blueprint test: %w", err) + } + err = os.MkdirAll(testDir, os.ModePerm) + if err != nil { + return fmt.Errorf("error creating test dir: %w", err) + } + return writeFile(path.Join(testDir, fmt.Sprintf("%s_test.go", strcase.ToSnake(name))), testFile) +} diff --git a/cli/bptest/init_test.go b/cli/bptest/init_test.go new file mode 100644 index 00000000000..b35a6a7ad94 --- /dev/null +++ b/cli/bptest/init_test.go @@ -0,0 +1,158 @@ +package bptest + +import ( + "os" + "path" + "testing" + + "github.com/otiai10/copy" + "github.com/stretchr/testify/assert" +) + +const ( + initTestDir = "testdata/init" +) + +func TestInitTest(t *testing.T) { + tests := []struct { + name string + bptName string + preProcessTestDir func(t *testing.T, dir string) + expectedFilesContents map[string]string + errMsg string + }{ + { + name: "simple with mod", + bptName: "foo", + expectedFilesContents: map[string]string{ + "test/integration/foo/foo_test.go": `package foo + +import ( + "fmt" + "testing" + + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/gcloud" + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/tft" + "github.com/stretchr/testify/assert" +) + +func TestFoo(t *testing.T) { + bpt := tft.NewTFBlueprintTest(t) + + bpt.DefineVerify(func(assert *assert.Assertions) { + bpt.DefaultVerify(assert) + + foo := bpt.GetStringOutput("foo") + + op := gcloud.Run(t,"") + assert.Contains(op.Get("result").String(), "foo", "contains foo") + }) + + bpt.Test() +} +`, + "test/integration/go.mod": "", // we create an empty go.mod in preprocess so no generation is expected + }, + preProcessTestDir: func(t *testing.T, dir string) { + _, err := os.Create(path.Join(dir, intTestPath, "go.mod")) + if err != nil { + t.Fatalf("error creating go.mod: %v", err) + } + }, + }, + { + name: "simple without mod", + bptName: "foo", + expectedFilesContents: map[string]string{ + "test/integration/foo/foo_test.go": `package foo + +import ( + "fmt" + "testing" + + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/gcloud" + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/tft" + "github.com/stretchr/testify/assert" +) + +func TestFoo(t *testing.T) { + bpt := tft.NewTFBlueprintTest(t) + + bpt.DefineVerify(func(assert *assert.Assertions) { + bpt.DefaultVerify(assert) + + foo := bpt.GetStringOutput("foo") + + op := gcloud.Run(t,"") + assert.Contains(op.Get("result").String(), "foo", "contains foo") + }) + + bpt.Test() +} +`, + "test/integration/go.mod": `module github.com/terraform-google-modules/init/test/integration + +go 1.16 + +require ( + github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test v0.4.0 + github.com/stretchr/testify v1.8.1 +) +`, + }, + }, + { + name: "invalid already exists", + bptName: "bar", + errMsg: "test/integration/bar already exists", + }, + { + name: "invalid no example", + bptName: "baz", + errMsg: "unable to discover test configs for test/integration/baz: unable to find config in test/fixtures/baz nor examples/baz", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert := assert.New(t) + // copy fixture to tmpdir + tmpDir := path.Join(t.TempDir(), initTestDir) + err := copy.Copy(initTestDir, tmpDir) + assert.NoError(err) + // apply any pre processing before tests + if tt.preProcessTestDir != nil { + tt.preProcessTestDir(t, tmpDir) + } + // switch to tmp dir for test + t.Cleanup(switchDir(t, tmpDir)) + + err = initTest(tt.bptName) + if tt.errMsg != "" { + assert.NotNil(err) + assert.Contains(err.Error(), tt.errMsg) + } else { + assert.NoError(err) + for name, expectedContent := range tt.expectedFilesContents { + pth := path.Join(tmpDir, name) + assert.FileExists(pth) + gotContents, err := os.ReadFile(pth) + assert.NoError(err) + assert.Equal(expectedContent, string(gotContents)) + } + } + }) + } +} + +func switchDir(t *testing.T, dir string) func() { + assert := assert.New(t) + currDir, err := os.Getwd() + assert.NoError(err) + err = os.Chdir(dir) + assert.NoError(err) + return func() { + if err := os.Chdir(currDir); err != nil { + assert.NoError(err) + } + } +} diff --git a/cli/bptest/lint.go b/cli/bptest/lint.go new file mode 100644 index 00000000000..26f6141fa23 --- /dev/null +++ b/cli/bptest/lint.go @@ -0,0 +1,47 @@ +package bptest + +import ( + "fmt" + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/cli/bpmetadata" + "os" +) + +const metadataFile = "metadata.yaml" + +// RunLintCommand is the entry function that will run the metadata.yml lint checks. +func RunLintCommand() { + dir, err := os.Getwd() + if err != nil { + fmt.Printf("Error getting current directory: %v\n", err) + os.Exit(1) + } + + // Parse medata.yaml to proto + metadata, err := bpmetadata.UnmarshalMetadata(dir, "/"+metadataFile) + metadataFile := dir + "/" + metadataFile + + if err != nil { + fmt.Printf("Error parsing metadata file: %v\n", err) + os.Exit(1) + } + + ctx := lintContext{ + metadata: metadata, + filePath: metadataFile, + } + + runner := &lintRunner{} + runner.RegisterRule(&BlueprintConnectionSourceVersionRule{}) + + // Run lint checks + errs := runner.Run(ctx) + if len(errs) > 0 { + fmt.Println("Linting failed with the following errors:") + for _, err := range errs { + fmt.Println("- ", err) + } + os.Exit(1) + } else { + fmt.Println("All lint checks passed!") + } +} diff --git a/cli/bptest/lint_interface.go b/cli/bptest/lint_interface.go new file mode 100644 index 00000000000..441a59cbaa8 --- /dev/null +++ b/cli/bptest/lint_interface.go @@ -0,0 +1,49 @@ +package bptest + +import ( + "fmt" + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/cli/bpmetadata" + "os" +) + +// lintRule defines the common interface for all metadata lint rules. +type lintRule interface { + name() string // Unique name of the rule + enabled() bool // Indicates if the rule is enabled by default + check(lintContext) error // Main entrypoint for rule validation +} + +// LintContext holds the metadata and other contextual information for a rule. +type lintContext struct { + metadata *bpmetadata.BlueprintMetadata // Parsed metadata for the blueprint + filePath string // Path of the metadata file being checked +} + +// LintRunner is responsible for running all registered lint rules. +type lintRunner struct { + rules []lintRule +} + +// RegisterRule adds a new rule to the runner. +func (r *lintRunner) RegisterRule(rule lintRule) { + r.rules = append(r.rules, rule) +} + +// Run runs all the registered rules on the provided context. +func (r *lintRunner) Run(ctx lintContext) []error { + var errs []error + if os.Getenv("BLUEPRINT_LINT_DISABLE") == "1" { + fmt.Println("BLUEPRINT_LINT_DISABLE is set to 1. Skipping lint checks.") + return errs + } + + for _, rule := range r.rules { + if rule.enabled() { + err := rule.check(ctx) + if err != nil { + errs = append(errs, err) + } + } + } + return errs +} diff --git a/cli/bptest/lint_interface_test.go b/cli/bptest/lint_interface_test.go new file mode 100644 index 00000000000..a6c5284682c --- /dev/null +++ b/cli/bptest/lint_interface_test.go @@ -0,0 +1,82 @@ +package bptest + +import ( + "errors" + "os" + "testing" + + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/cli/bpmetadata" + "github.com/stretchr/testify/assert" +) + +const BlueprintLintDisableEnv = "BLUEPRINT_LINT_DISABLE" + +type mockLintRule struct { + Name string + Enabled bool + Err error +} + +func (m *mockLintRule) name() string { + return m.Name +} + +func (m *mockLintRule) enabled() bool { + return m.Enabled +} + +func (m *mockLintRule) check(ctx lintContext) error { + return m.Err +} + +func TestLintRunner(t *testing.T) { + t.Run("register and run rules with lintRunner", func(t *testing.T) { + mockRule1 := &mockLintRule{Name: "MockRule1", Enabled: true, Err: nil} + mockRule2 := &mockLintRule{Name: "MockRule2", Enabled: true, Err: errors.New("lint error")} + mockRule3 := &mockLintRule{Name: "MockRule3", Enabled: false, Err: nil} + + runner := lintRunner{} + runner.RegisterRule(mockRule1) + runner.RegisterRule(mockRule2) + runner.RegisterRule(mockRule3) + + ctx := lintContext{ + metadata: &bpmetadata.BlueprintMetadata{ApiVersion: "v1", Kind: "Blueprint"}, + filePath: "/path/to/metadata/file.yaml", + } + + errs := runner.Run(ctx) + assert.Len(t, errs, 1, "Only one rule should return an error") + assert.Equal(t, "lint error", errs[0].Error(), "Error message should match the expected lint error") + }) + + t.Run("run without registered rules", func(t *testing.T) { + runner := lintRunner{} + ctx := lintContext{ + metadata: &bpmetadata.BlueprintMetadata{ApiVersion: "v1", Kind: "Blueprint"}, + filePath: "/path/to/metadata/file.yaml", + } + + errs := runner.Run(ctx) + assert.Empty(t, errs, "No errors should be returned when no rules are registered") + }) + t.Run("skip lint rules when BLUEPRINT_LINT_DISABLE is set", func(t *testing.T) { + os.Setenv(BlueprintLintDisableEnv, "1") + defer os.Unsetenv(BlueprintLintDisableEnv) + + mockRule1 := &mockLintRule{Name: "MockRule1", Enabled: true, Err: errors.New("lint error")} + mockRule2 := &mockLintRule{Name: "MockRule2", Enabled: true, Err: errors.New("another lint error")} + + runner := lintRunner{} + runner.RegisterRule(mockRule1) + runner.RegisterRule(mockRule2) + + ctx := lintContext{ + metadata: &bpmetadata.BlueprintMetadata{ApiVersion: "v1", Kind: "Blueprint"}, + filePath: "/path/to/metadata/file.yaml", + } + + errs := runner.Run(ctx) + assert.Empty(t, errs, "No errors should be returned when BLUEPRINT_LINT_DISABLE is set") + }) +} diff --git a/cli/bptest/list.go b/cli/bptest/list.go new file mode 100644 index 00000000000..11c6d90773e --- /dev/null +++ b/cli/bptest/list.go @@ -0,0 +1,203 @@ +package bptest + +import ( + "errors" + "fmt" + "io/fs" + "os" + "path" + "path/filepath" + "sort" + "strings" + + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/discovery" + testing "github.com/mitchellh/go-testing-interface" +) + +const ( + discoverTestFilename = "discover_test.go" +) + +type bpTest struct { + name string + config string + location string + bptestCfg discovery.BlueprintTestConfig +} + +// getTests returns slice of all blueprint tests +func getTests(intTestDir string) ([]bpTest, error) { + intTestDir, err := getIntTestDir(intTestDir) + if err != nil { + return nil, err + } + Log.Info(fmt.Sprintf("using test-dir: %s", intTestDir)) + + tests := []bpTest{} + discoveredTests, err := getDiscoveredTests(intTestDir) + if err != nil { + return nil, err + } + tests = append(tests, discoveredTests...) + + explicitTests, err := getExplicitTests(intTestDir) + if err != nil { + return nil, err + } + tests = append(tests, explicitTests...) + + return tests, nil +} + +// getDiscoveredTests returns slice of discovered blueprint tests +func getDiscoveredTests(intTestDir string) ([]bpTest, error) { + discoverTestFile := path.Join(intTestDir, discoverTestFilename) + // skip discovering tests if no discoverTestFile + _, err := os.Stat(discoverTestFile) + if err != nil { + if !errors.Is(err, os.ErrNotExist) { + return nil, err + } + Log.Warn(fmt.Sprintf("Skipping discovered test. %s not found.", discoverTestFilename)) + return nil, nil + } + + // if discoverTestFile is present, find auto discovered tests + tests := []bpTest{} + if discoverTestFile != "" { + discoverTestName, err := getDiscoverTestName(discoverTestFile) + if err != nil { + return nil, err + } + discoveredSubTests := discovery.FindTestConfigs(&testing.RuntimeT{}, intTestDir) + for testName, testCfg := range discoveredSubTests { + bptestCfg, err := discovery.GetTestConfig(path.Join(testCfg, discovery.DefaultTestConfigFilename)) + if err != nil { + Log.Warn(fmt.Sprintf("error discovering BlueprintTest config: %v", err)) + } + tests = append(tests, bpTest{name: fmt.Sprintf("%s/%s", discoverTestName, testName), config: testCfg, location: discoverTestFile, bptestCfg: bptestCfg}) + } + } + sort.SliceStable(tests, func(i, j int) bool { return tests[i].name < tests[j].name }) + return tests, nil +} + +func getExplicitTests(intTestDir string) ([]bpTest, error) { + // find all explicit test files ending with *_test.go excluding discover_test.go within intTestDir + testFiles, err := findFiles(intTestDir, + func(d fs.DirEntry) bool { + return strings.HasSuffix(d.Name(), "_test.go") && d.Name() != discoverTestFilename + }, + ) + + if err != nil { + Log.Warn(fmt.Sprintf("walking file path: %s : details: %v", intTestDir, err)) + } + + eTests := []bpTest{} + for _, testFile := range testFiles { + // testDir name maps to a matching example/fixture + testDir := path.Dir(testFile) + testCfg, err := discovery.GetConfigDirFromTestDir(testDir) + if err != nil { + Log.Warn(fmt.Sprintf("unable to discover configs for %s: %v", testDir, err)) + } + + // discover BlueprintTest config if any + bptestCfg, err := discovery.GetTestConfig(path.Join(testCfg, discovery.DefaultTestConfigFilename)) + if err != nil { + Log.Warn(fmt.Sprintf("error discovering BlueprintTest config: %v", err)) + } + + testFns, err := getTestFuncsFromFile(testFile) + if err != nil { + return nil, err + } + for _, fnName := range testFns { + eTests = append(eTests, bpTest{name: fnName, location: testFile, config: testCfg, bptestCfg: bptestCfg}) + } + } + sort.SliceStable(eTests, func(i, j int) bool { return eTests[i].name < eTests[j].name }) + return eTests, nil +} + +// getDiscoverTestName returns test name used for auto discovered tests +func getDiscoverTestName(dFileName string) (string, error) { + fn, err := getTestFuncsFromFile(dFileName) + if err != nil { + return "", err + } + // enforce only one main test func decl for discovered tests + if len(fn) != 1 { + return "", fmt.Errorf("only one function should be defined in %s. Found %+q", dFileName, fn) + } + return fn[0], nil +} + +// discoverIntTestDir attempts to discover the integration test directory +// by searching for discover_test.go in the current working directory. +// If not found, it returns current working directory. +func discoverIntTestDir(cwd string) (string, error) { + // search for discover_test.go + discoverTestFiles, err := findFiles(cwd, + func(d fs.DirEntry) bool { + return d.Name() == discoverTestFilename + }, + ) + + if err != nil { + Log.Warn(fmt.Sprintf("walking file path: %s : details: %v", cwd, err)) + } + + if len(discoverTestFiles) > 1 { + return "", fmt.Errorf("found multiple %s files: %+q. Exactly one file was expected", discoverTestFilename, discoverTestFiles) + } + if len(discoverTestFiles) == 1 { + relIntTestDir, err := filepath.Rel(cwd, path.Dir(discoverTestFiles[0])) + if err != nil { + return "", err + } + return relIntTestDir, nil + } + // no discover_test.go file discovered + return ".", nil +} + +// getIntTestDir discovers the integration test directory +// from current working directory if an empty intTestDir is provided +func getIntTestDir(intTestDir string) (string, error) { + if intTestDir != "" { + return intTestDir, nil + } + // discover from current working directory + cwd, err := os.Getwd() + if err != nil { + return "", err + } + return discoverIntTestDir(cwd) +} + +// findFiles returns a slice of file paths matching matchFn +func findFiles(dir string, matchFn func(d fs.DirEntry) bool) ([]string, error) { + files := []string{} + err := filepath.WalkDir(dir, func(fpath string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + // ignore hidden dirs + if d.IsDir() && strings.HasPrefix(d.Name(), ".") { + return filepath.SkipDir + } + if !d.IsDir() && matchFn(d) { + files = append(files, fpath) + return nil + } + return nil + }) + + if err != nil { + return nil, err + } + + return files, nil +} diff --git a/cli/bptest/list_test.go b/cli/bptest/list_test.go new file mode 100644 index 00000000000..f12c8602bf5 --- /dev/null +++ b/cli/bptest/list_test.go @@ -0,0 +1,264 @@ +package bptest + +import ( + "os" + "path" + "testing" + + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/discovery" + "github.com/stretchr/testify/assert" +) + +const ( + testDirWithDiscovery = "testdata/with-discovery" + intTestDir = "test/integration" +) + +func TestGetDiscoveredTests(t *testing.T) { + tests := []struct { + name string + testDir string + want []bpTest + errMsg string + }{ + { + name: "simple", + testDir: path.Join(testDirWithDiscovery, intTestDir), + want: []bpTest{ + getBPTest("TestAll/examples/baz", path.Join(testDirWithDiscovery, "examples/baz"), path.Join(testDirWithDiscovery, intTestDir, discoverTestFilename), false), + getBPTest("TestAll/fixtures/qux", path.Join(testDirWithDiscovery, "test/fixtures/qux"), path.Join(testDirWithDiscovery, intTestDir, discoverTestFilename), false), + getBPTest("TestAll/examples/quux", path.Join(testDirWithDiscovery, "examples/quux"), path.Join(testDirWithDiscovery, intTestDir, discoverTestFilename), true), + }, + }, + { + name: "no discovery", + testDir: path.Join(testDirWithDiscovery, "doesnotexist"), + want: []bpTest{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert := assert.New(t) + got, err := getDiscoveredTests(tt.testDir) + if tt.errMsg != "" { + assert.NotNil(err) + assert.Contains(err.Error(), tt.errMsg) + } else { + assert.NoError(err) + assert.ElementsMatch(tt.want, got) + } + }) + } +} + +func TestGetExplicitTests(t *testing.T) { + tests := []struct { + name string + testDir string + want []bpTest + errMsg string + }{ + { + name: "simple", + testDir: path.Join(testDirWithDiscovery, intTestDir), + want: []bpTest{ + getBPTest("TestBar", path.Join(testDirWithDiscovery, "examples/bar"), path.Join(testDirWithDiscovery, intTestDir, "bar/bar_test.go"), false), + getBPTest("TestFoo", path.Join(testDirWithDiscovery, "test/fixtures/foo"), path.Join(testDirWithDiscovery, intTestDir, "foo/foo_test.go"), false), + getBPTest("TestQuuz", path.Join(testDirWithDiscovery, "test/fixtures/quuz"), path.Join(testDirWithDiscovery, intTestDir, "quuz/quuz_test.go"), true), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert := assert.New(t) + got, err := getExplicitTests(tt.testDir) + if tt.errMsg != "" { + assert.NotNil(err) + assert.Contains(err.Error(), tt.errMsg) + } else { + assert.NoError(err) + assert.ElementsMatch(tt.want, got) + } + }) + } +} + +func TestGetTests(t *testing.T) { + tests := []struct { + name string + testDir string + want []bpTest + errMsg string + }{ + { + name: "simple", + testDir: path.Join(testDirWithDiscovery, intTestDir), + want: []bpTest{ + getBPTest("TestAll/examples/baz", path.Join(testDirWithDiscovery, "examples/baz"), path.Join(testDirWithDiscovery, intTestDir, discoverTestFilename), false), + getBPTest("TestAll/fixtures/qux", path.Join(testDirWithDiscovery, "test/fixtures/qux"), path.Join(testDirWithDiscovery, intTestDir, discoverTestFilename), false), + getBPTest("TestAll/examples/quux", path.Join(testDirWithDiscovery, "examples/quux"), path.Join(testDirWithDiscovery, intTestDir, discoverTestFilename), true), + getBPTest("TestBar", path.Join(testDirWithDiscovery, "examples/bar"), path.Join(testDirWithDiscovery, intTestDir, "bar/bar_test.go"), false), + getBPTest("TestFoo", path.Join(testDirWithDiscovery, "test/fixtures/foo"), path.Join(testDirWithDiscovery, intTestDir, "foo/foo_test.go"), false), + getBPTest("TestQuuz", path.Join(testDirWithDiscovery, "test/fixtures/quuz"), path.Join(testDirWithDiscovery, intTestDir, "quuz/quuz_test.go"), true), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert := assert.New(t) + got, err := getTests(tt.testDir) + if tt.errMsg != "" { + assert.NotNil(err) + assert.Contains(err.Error(), tt.errMsg) + } else { + assert.NoError(err) + assert.ElementsMatch(tt.want, got) + } + }) + } +} + +func getBPTest(n string, c string, l string, s bool) bpTest { + b := discovery.BlueprintTestConfig{} + b.Spec.Skip = s + if s { + b.APIVersion = "blueprints.cloud.google.com/v1alpha1" + b.Kind = "BlueprintTest" + b.Name = path.Base(c) + b.Path = path.Join(c, discovery.DefaultTestConfigFilename) + } + return bpTest{name: n, config: c, location: l, bptestCfg: b} +} + +func TestGetDiscoverTestName(t *testing.T) { + tests := []struct { + name string + data string + want string + errMsg string + }{ + { + name: "simple", + data: `package test + +import "testing" + +func TestAll(t *testing.T) { +} +`, + want: "TestAll", + }, + { + name: "multiple", + data: `package test + +import "testing" + +const ShouldNotErr = "foo" + +func TestA(t *testing.T) { +} + +func TestB(t *testing.T) { +} + +func OtherHelper(t *testing.T) { +} +`, + errMsg: "only one function should be defined", + }, + { + name: "empty", + data: `package test +`, + errMsg: "only one function should be defined", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert := assert.New(t) + filePath, cleanup := writeTmpFile(t, tt.data) + defer cleanup() + got, err := getDiscoverTestName(filePath) + if tt.errMsg != "" { + assert.NotNil(err) + assert.Contains(err.Error(), tt.errMsg) + } else { + assert.NoError(err) + assert.Equal(tt.want, got) + } + }) + } +} + +func Test_discoverIntTestDir(t *testing.T) { + tests := []struct { + name string + files []string + want string + errMsg string + }{ + { + name: "with single discover_test.go", + files: []string{discoverTestFilename}, + want: ".", + }, + { + name: "with single discover_test.go in a dir", + files: []string{path.Join("test/integration", discoverTestFilename)}, + want: "test/integration", + }, + { + name: "with single discover_test.go in a dir and other files", + files: []string{path.Join("foo/bar/baz", discoverTestFilename), "foo.go", "test.tf", "other/test/bar_test.go"}, + want: "foo/bar/baz", + }, + { + name: "with single discover_test.go and multiple hidden discover_test.go", + files: []string{path.Join("foo/bar/baz", discoverTestFilename), path.Join("foo/bar/baz/.terraform", discoverTestFilename), "foo.go", "test.tf", "other/test/bar_test.go"}, + want: "foo/bar/baz", + }, + { + name: "with multiple discover_test.go", + files: []string{path.Join("mod1/test/integration", discoverTestFilename), path.Join("mod2/test/integration", discoverTestFilename)}, + errMsg: "found multiple discover_test.go files:", + }, + { + name: "no discover_test.go files", + files: []string{}, + want: ".", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert := assert.New(t) + dir, cleanup := createFilesInTmpDir(t, tt.files) + defer cleanup() + got, err := discoverIntTestDir(dir) + if tt.errMsg != "" { + assert.NotNil(err) + assert.Contains(err.Error(), tt.errMsg) + } else { + assert.NoError(err) + assert.Equal(tt.want, got) + } + }) + } +} + +func createFilesInTmpDir(t *testing.T, files []string) (string, func()) { + assert := assert.New(t) + tempDir, err := os.MkdirTemp("", "bpt-") + assert.NoError(err) + cleanup := func() { os.RemoveAll(tempDir) } + + //create files in tmpdir + for _, f := range files { + p := path.Join(tempDir, path.Dir(f)) + err = os.MkdirAll(p, 0755) + assert.NoError(err) + _, err = os.Create(path.Join(p, path.Base(f))) + assert.NoError(err) + } + return tempDir, cleanup +} diff --git a/cli/bptest/main.go b/cli/bptest/main.go new file mode 100644 index 00000000000..711949b5f21 --- /dev/null +++ b/cli/bptest/main.go @@ -0,0 +1,8 @@ +package bptest + +import ( + log "github.com/inconshreveable/log15" +) + +// bptest log15 handler +var Log = log.New() diff --git a/cli/bptest/run.go b/cli/bptest/run.go new file mode 100644 index 00000000000..6c64eb2af24 --- /dev/null +++ b/cli/bptest/run.go @@ -0,0 +1,139 @@ +package bptest + +import ( + "bufio" + "fmt" + "os" + "os/exec" + "path" + "path/filepath" + "regexp" + "sync" + + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/utils" + "github.com/spf13/viper" +) + +const ( + allTests = "all" + testStageEnvVarKey = "RUN_STAGE" + gotestBin = "gotest" + goBin = "go" + + // The tfplan.json files that are being used as input for the terraform validation tests + // through the gcloud beta terraform vet are higher than the buffer default value (64*1024), + // after some tests we had evidences that the value were around from 3MB to 5MB, so + // we choosed a value that is at least 2x higher than the original one to avoid errors. + // maxScanTokenSize is the maximum size used to buffer a token + // startBufSize is the initial of the buffer token + maxScanTokenSize = 10 * 1024 * 1024 + startBufSize = 4096 + // This must be kept in sync with what github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/tft parses. + setupEnvVarPrefix = "CFT_SETUP_" +) + +var allTestArgs = []string{"-p", "1", "-count", "1", "-timeout", "0"} + +// validateAndGetRelativeTestPkg validates a given test or test regex is part of the blueprint test set and returns location of test relative to intTestDir +func validateAndGetRelativeTestPkg(intTestDir string, name string) (string, error) { + // user wants to run all tests + if name == allTests { + return "./...", nil + } + + tests, err := getTests(intTestDir) + if err != nil { + return "", err + } + testNames := []string{} + for _, test := range tests { + if test.bptestCfg.Spec.Skip { + Log.Info(fmt.Sprintf("skipping %s due to BlueprintTest config %s", test.name, test.bptestCfg.Name)) + continue + } + matched, _ := regexp.Match(name, []byte(test.name)) + if test.name == name { + //exact match, return test relative test pkg + relPkg, err := filepath.Rel(intTestDir, path.Dir(test.location)) + if err != nil { + return "", err + } + return fmt.Sprintf("./%s", relPkg), nil + } else if matched { + // loose match, more than one test could be specified + return "./...", nil + } + testNames = append(testNames, test.name) + } + return "", fmt.Errorf("unable to find %s- one of %+q expected", name, append(testNames, allTests)) +} + +// streamExec runs a given cmd while streaming logs +func streamExec(cmd *exec.Cmd) error { + op, err := cmd.StdoutPipe() + if err != nil { + return err + } + cmd.Stderr = cmd.Stdout + Log.Debug(fmt.Sprintf("running %s with args %v in %s", cmd.Path, cmd.Args, cmd.Dir)) + + // waitgroup to block while processing exec op + var wg sync.WaitGroup + wg.Add(1) + defer wg.Wait() + go func() { + defer wg.Done() + scanner := bufio.NewScanner(op) + scanner.Buffer(make([]byte, startBufSize), maxScanTokenSize) + for scanner.Scan() { + fmt.Println(scanner.Text()) + } + if err := scanner.Err(); err != nil { + Log.Error(fmt.Sprintf("error reading output: %s", err)) + } + }() + + // run command + if err := cmd.Run(); err != nil { + return fmt.Errorf("error running command: %w", err) + } + return nil +} + +// getTestCmd returns a prepared cmd for running the specified tests(s) +func getTestCmd(intTestDir string, testStage string, testName string, relTestPkg string, setupVars map[string]string) (*exec.Cmd, error) { + + // pass all current env vars to test command + env := os.Environ() + // set test stage env var if specified + if testStage != "" { + env = append(env, fmt.Sprintf("%s=%s", testStageEnvVarKey, testStage)) + } + // Load the env with any setup-vars specified + for k, v := range setupVars { + env = append(env, fmt.Sprintf("%s%s=%s", setupEnvVarPrefix, k, v)) + } + + // determine binary and args used for test execution + testArgs := append([]string{relTestPkg}, allTestArgs...) + if testName != allTests { + testArgs = append([]string{relTestPkg, "-run", testName}, allTestArgs...) + } + cmdBin := goBin + if utils.BinaryInPath(gotestBin) != nil { + testArgs = append([]string{"test"}, testArgs...) + } else { + cmdBin = gotestBin + // CI=true enables color op for non tty exec output + env = append(env, "CI=true") + } + // verbose test output if global verbose flag is passed + if viper.GetBool("verbose") { + testArgs = append(testArgs, "-v") + } + // prepare cmd + cmd := exec.Command(cmdBin, testArgs...) + cmd.Env = env + cmd.Dir = intTestDir + return cmd, nil +} diff --git a/cli/bptest/run_test.go b/cli/bptest/run_test.go new file mode 100644 index 00000000000..15bf60ce409 --- /dev/null +++ b/cli/bptest/run_test.go @@ -0,0 +1,126 @@ +package bptest + +import ( + "fmt" + "path" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestIsValidTestName(t *testing.T) { + tests := []struct { + name string + intTestDir string + testName string + relTestPkg string + errMsg string + }{ + { + name: "valid explicit", + testName: "TestBar", + relTestPkg: "./bar", + }, + { + name: "valid discovered", + testName: "TestAll/examples/baz", + relTestPkg: "./.", + }, + { + name: "valid all regex", + testName: "Test.*", + relTestPkg: "./...", + }, + { + name: "all", + testName: "all", + relTestPkg: "./...", + }, + { + name: "invalid", + testName: "TestBaz", + relTestPkg: "", + errMsg: "unable to find TestBaz- one of [\"TestAll/examples/baz\" \"TestAll/fixtures/qux\" \"TestBar\" \"TestFoo\" \"all\"]", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert := assert.New(t) + if tt.intTestDir == "" { + tt.intTestDir = path.Join(testDirWithDiscovery, intTestDir) + } + relTestPkg, err := validateAndGetRelativeTestPkg(tt.intTestDir, tt.testName) + if tt.errMsg != "" { + assert.NotNil(err) + assert.Contains(err.Error(), tt.errMsg) + } else { + assert.Equal(tt.relTestPkg, relTestPkg) + assert.NoError(err) + } + }) + } +} + +func TestGetTestCmd(t *testing.T) { + tests := []struct { + name string + intTestDir string + testStage string + testName string + relTestPkg string + setupVars map[string]string + wantArgs []string + wantEnv []string + errMsg string + }{ + { + name: "single test", + testName: "TestFoo", + relTestPkg: "foo", + wantArgs: []string{"foo", "-run", "TestFoo", "-p", "1", "-count", "1", "-timeout", "0"}, + }, + { + name: "all tests", + testName: "all", + wantArgs: []string{"./...", "-p", "1", "-count", "1", "-timeout", "0"}, + }, + { + name: "custom stage", + testName: "TestFoo", + testStage: "init", + wantArgs: []string{"./...", "-run", "TestFoo", "-p", "1", "-count", "1", "-timeout", "0"}, + wantEnv: []string{"RUN_STAGE=init"}, + }, + { + name: "setup vars", + testName: "TestFoo", + testStage: "verify", + setupVars: map[string]string{"my-key": "my-value"}, + wantArgs: []string{"./...", "-run", "TestFoo", "-p", "1", "-count", "1", "-timeout", "0"}, + wantEnv: []string{"RUN_STAGE=verify", "CFT_SETUP_my-key=my-value"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert := assert.New(t) + if tt.intTestDir == "" { + tt.intTestDir = path.Join(testDirWithDiscovery, intTestDir) + } + if tt.relTestPkg == "" { + tt.relTestPkg = "./..." + } + gotCmd, err := getTestCmd(tt.intTestDir, tt.testStage, tt.testName, tt.relTestPkg, tt.setupVars) + if tt.errMsg != "" { + assert.NotNil(err) + assert.Contains(err.Error(), tt.errMsg) + } else { + assert.NoError(err) + assert.Subset(gotCmd.Args, tt.wantArgs) + if tt.testStage != "" { + assert.Contains(gotCmd.Env, fmt.Sprintf("RUN_STAGE=%s", tt.testStage)) + } + } + assert.Subset(gotCmd.Env, tt.wantEnv) + }) + } +} diff --git a/cli/bptest/stages.go b/cli/bptest/stages.go new file mode 100644 index 00000000000..ea57e60786d --- /dev/null +++ b/cli/bptest/stages.go @@ -0,0 +1,32 @@ +package bptest + +import "fmt" + +var stages = []string{"init", "plan", "apply", "verify", "teardown"} + +var stagesWithAlias = map[string][]string{ + stages[0]: {"create"}, + stages[1]: {}, + stages[2]: {"converge"}, + stages[3]: {}, + stages[4]: {"destroy"}, +} + +// validateAndGetStage validates given stage and resolves to stage name if an alias is provided +func validateAndGetStage(s string) (string, error) { + // empty stage is a special case for running all stages + if s == "" { + return "", nil + } + for stageName, aliases := range stagesWithAlias { + if stageName == s { + return stageName, nil + } + for _, alias := range aliases { + if alias == s { + return stageName, nil + } + } + } + return "", fmt.Errorf("invalid stage name %s - one of %+q expected", s, stages) +} diff --git a/cli/bptest/stages_test.go b/cli/bptest/stages_test.go new file mode 100644 index 00000000000..d870663b621 --- /dev/null +++ b/cli/bptest/stages_test.go @@ -0,0 +1,56 @@ +package bptest + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestValidateAndGetStage(t *testing.T) { + tests := []struct { + name string + stage string + want string + errMsg string + }{ + { + name: "valid name", + stage: "init", + want: "init", + }, + { + name: "alias name", + stage: "create", + want: "init", + }, + { + name: "valid name no alias", + stage: "verify", + want: "verify", + }, + { + name: "invalid name", + stage: "foo", + errMsg: fmt.Sprintf("invalid stage name foo - one of %+q expected", stages), + }, + { + name: "empty (all stages)", + stage: "", + want: "", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert := assert.New(t) + got, err := validateAndGetStage(tt.stage) + if tt.errMsg != "" { + assert.NotNil(err) + assert.Contains(err.Error(), tt.errMsg) + } else { + assert.NoError(err) + assert.Equal(tt.want, got) + } + }) + } +} diff --git a/cli/bptest/table.go b/cli/bptest/table.go new file mode 100644 index 00000000000..91c68204357 --- /dev/null +++ b/cli/bptest/table.go @@ -0,0 +1,21 @@ +package bptest + +import ( + "os" + + "github.com/jedib0t/go-pretty/v6/table" + "github.com/jedib0t/go-pretty/v6/text" +) + +func newTable() table.Writer { + tw := table.NewWriter() + tw.Style().Color.Header = text.Colors{text.FgGreen} + tw.SetColumnConfigs( + []table.ColumnConfig{ + {Number: 1, Colors: text.Colors{text.FgYellow}}, + }, + ) + tw.Style().Options.DrawBorder = false + tw.SetOutputMirror(os.Stdout) + return tw +} diff --git a/cli/bptest/templates/blueprint_test.go.tmpl b/cli/bptest/templates/blueprint_test.go.tmpl new file mode 100644 index 00000000000..157c681813b --- /dev/null +++ b/cli/bptest/templates/blueprint_test.go.tmpl @@ -0,0 +1,25 @@ +package {{.PkgName}} + +import ( + "fmt" + "testing" + + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/gcloud" + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/tft" + "github.com/stretchr/testify/assert" +) + +func {{.FnName}}(t *testing.T) { + bpt := tft.NewTFBlueprintTest(t) + + bpt.DefineVerify(func(assert *assert.Assertions) { + bpt.DefaultVerify(assert) + {{range .Inputs}} + {{toLowerCamel .}} := bpt.GetStringOutput("{{.}}"){{end}} + + op := gcloud.Run(t,"") + assert.Contains(op.Get("result").String(), "foo", "contains foo") + }) + + bpt.Test() +} diff --git a/cli/bptest/templates/discover_test.go.tmpl b/cli/bptest/templates/discover_test.go.tmpl new file mode 100644 index 00000000000..042bb992e75 --- /dev/null +++ b/cli/bptest/templates/discover_test.go.tmpl @@ -0,0 +1,11 @@ +package test + +import ( + "testing" + + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/tft" +) + +func TestAll(t *testing.T) { + tft.AutoDiscoverAndTest(t) +} diff --git a/cli/bptest/templates/go.mod.tmpl b/cli/bptest/templates/go.mod.tmpl new file mode 100644 index 00000000000..cf05bd512ea --- /dev/null +++ b/cli/bptest/templates/go.mod.tmpl @@ -0,0 +1,8 @@ +module github.com/terraform-google-modules/%s/test/integration + +go 1.16 + +require ( + github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test v0.4.0 + github.com/stretchr/testify v1.8.1 +) diff --git a/cli/bptest/testdata/cb/newAll.yaml b/cli/bptest/testdata/cb/newAll.yaml new file mode 100644 index 00000000000..95280219ebc --- /dev/null +++ b/cli/bptest/testdata/cb/newAll.yaml @@ -0,0 +1,41 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +timeout: 3600s +steps: +- id: prepare + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && prepare_environment'] + env: + - 'TF_VAR_org_id=$_ORG_ID' + - 'TF_VAR_folder_id=$_FOLDER_ID' + - 'TF_VAR_billing_account=$_BILLING_ACCOUNT' +- id: create + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run all --stage init --verbose'] +- id: converge + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run all --stage apply --verbose'] +- id: verify + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run all --stage verify --verbose'] +- id: destroy + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run all --stage teardown --verbose'] +tags: +- 'ci' +- 'integration' +substitutions: + _DOCKER_IMAGE_DEVELOPER_TOOLS: 'cft/developer-tools' + _DOCKER_TAG_VERSION_DEVELOPER_TOOLS: '0' \ No newline at end of file diff --git a/cli/bptest/testdata/cb/newTarget.yaml b/cli/bptest/testdata/cb/newTarget.yaml new file mode 100644 index 00000000000..245115fd9ec --- /dev/null +++ b/cli/bptest/testdata/cb/newTarget.yaml @@ -0,0 +1,369 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +timeout: 12600s +steps: +- id: download acm + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && download_acm'] +- id: prepare + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && prepare_environment && chmod 600 /builder/home/.netrc && sleep 120'] + env: + - 'TF_VAR_org_id=$_ORG_ID' + - 'TF_VAR_folder_id=$_FOLDER_ID' + - 'TF_VAR_billing_account=$_BILLING_ACCOUNT' +- id: create all + waitFor: + - prepare + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run all --stage init --verbose'] +- id: converge disable-client-cert-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestDisableClientCert --stage apply --verbose'] +- id: verify disable-client-cert-local + waitFor: + - converge disable-client-cert-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestDisableClientCert --stage verify --verbose'] +- id: destroy disable-client-cert-local + waitFor: + - verify disable-client-cert-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestDisableClientCert --stage teardown --verbose'] +- id: converge shared-vpc-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSharedVpc --stage apply --verbose'] +- id: verify shared-vpc-local + waitFor: + - converge shared-vpc-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSharedVpc --stage verify --verbose'] +- id: destroy shared-vpc-local + waitFor: + - verify shared-vpc-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSharedVpc --stage teardown --verbose'] +- id: converge safer-cluster-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSaferCluster --stage apply --verbose'] +- id: verify safer-cluster-local + waitFor: + - converge safer-cluster-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSaferCluster --stage verify --verbose'] +- id: destroy safer-cluster-local + waitFor: + - verify safer-cluster-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSaferCluster --stage teardown --verbose'] +- id: converge simple-regional-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleRegional --stage apply --verbose'] +- id: verify simple-regional-local + waitFor: + - converge simple-regional-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleRegional --stage verify --verbose'] +- id: destroy simple-regional-local + waitFor: + - verify simple-regional-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleRegional --stage teardown --verbose'] +- id: converge simple-regional-private-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalPrivate --stage apply --verbose'] +- id: verify simple-regional-private-local + waitFor: + - converge simple-regional-private-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalPrivate --stage verify --verbose'] +- id: destroy simple-regional-private-local + waitFor: + - verify simple-regional-private-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalPrivate --stage teardown --verbose'] +- id: converge simple-regional-with-kubeconfig-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalWithKubeconfig --stage apply --verbose'] +- id: verify simple-regional-with-kubeconfig-local + waitFor: + - converge simple-regional-with-kubeconfig-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalWithKubeconfig --stage verify --verbose'] +- id: destroy simple-regional-with-kubeconfig-local + waitFor: + - verify simple-regional-with-kubeconfig-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalWithKubeconfig --stage teardown --verbose'] +- id: converge simple-regional-with-networking-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalWithNetworking --stage apply --verbose'] +- id: verify simple-regional-with-networking-local + waitFor: + - converge simple-regional-with-networking-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalWithNetworking --stage verify --verbose'] +- id: destroy simple-regional-with-networking-local + waitFor: + - verify simple-regional-with-networking-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalWithNetworking --stage teardown --verbose'] +- id: converge simple-zonal-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleZonal --stage apply --verbose'] +- id: verify simple-zonal-local + waitFor: + - converge simple-zonal-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleZonal --stage verify --verbose'] +- id: destroy simple-zonal-local + waitFor: + - verify simple-zonal-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleZonal --stage teardown --verbose'] +- id: converge simple-zonal-private-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleZonalPrivate --stage apply --verbose'] +- id: verify simple-zonal-private-local + waitFor: + - converge simple-zonal-private-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleZonalPrivate --stage verify --verbose'] +- id: destroy simple-zonal-private-local + waitFor: + - verify simple-zonal-private-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleZonalPrivate --stage teardown --verbose'] +- id: converge stub-domains-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestStubDomains --stage apply --verbose'] +- id: verify stub-domains-local + waitFor: + - converge stub-domains-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestStubDomains --stage verify --verbose'] +- id: destroy stub-domains-local + waitFor: + - verify stub-domains-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestStubDomains --stage teardown --verbose'] +- id: converge upstream-nameservers-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestUpstreamNameservers --stage apply --verbose'] +- id: verify upstream-nameservers-local + waitFor: + - converge upstream-nameservers-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestUpstreamNameservers --stage verify --verbose'] +- id: destroy upstream-nameservers-local + waitFor: + - verify upstream-nameservers-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestUpstreamNameservers --stage teardown --verbose'] +- id: converge stub-domains-upstream-nameservers-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestStubDomainsUpstreamNameservers --stage apply --verbose'] +- id: verify stub-domains-upstream-nameservers-local + waitFor: + - converge stub-domains-upstream-nameservers-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestStubDomainsUpstreamNameservers --stage verify --verbose'] +- id: destroy stub-domains-upstream-nameservers-local + waitFor: + - verify stub-domains-upstream-nameservers-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestStubDomainsUpstreamNameservers --stage teardown --verbose'] +- id: converge workload-metadata-config-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestWorkloadMetadataConfig --stage apply --verbose'] +- id: verify workload-metadata-config-local + waitFor: + - converge workload-metadata-config-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestWorkloadMetadataConfig --stage verify --verbose'] +- id: destroy workload-metadata-config-local + waitFor: + - verify workload-metadata-config-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestWorkloadMetadataConfig --stage teardown --verbose'] +- id: converge beta-cluster-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestBetaCluster --stage apply --verbose'] +- id: verify beta-cluster-local + waitFor: + - converge beta-cluster-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestBetaCluster --stage verify --verbose'] +- id: destroy beta-cluster-local + waitFor: + - verify beta-cluster-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestBetaCluster --stage teardown --verbose'] +- id: converge deploy-service-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestDeployService --stage apply --verbose'] +- id: verify deploy-service-local + waitFor: + - converge deploy-service-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestDeployService --stage verify --verbose'] +- id: destroy deploy-service-local + waitFor: + - verify deploy-service-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestDeployService --stage teardown --verbose'] +- id: converge node-pool-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestNodePool --stage apply --verbose'] +- id: verify node-pool-local + waitFor: + - converge node-pool-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestNodePool --stage verify --verbose'] +- id: destroy node-pool-local + waitFor: + - verify node-pool-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestNodePool --stage teardown --verbose'] +- id: converge sandbox-enabled-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSandboxEnabled --stage apply --verbose'] +- id: verify sandbox-enabled-local + waitFor: + - converge sandbox-enabled-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSandboxEnabled --stage verify --verbose'] +- id: destroy sandbox-enabled-local + waitFor: + - verify sandbox-enabled-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSandboxEnabled --stage teardown --verbose'] +- id: converge workload-identity-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestWorkloadIdentity --stage apply --verbose'] +- id: verify workload-identity-local + waitFor: + - converge workload-identity-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestWorkloadIdentity --stage verify --verbose'] +- id: destroy workload-identity-local + waitFor: + - verify workload-identity-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestWorkloadIdentity --stage teardown --verbose'] +- id: converge safer-cluster-iap-bastion-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSaferClusterIapBastion --stage apply --verbose'] +- id: verify safer-cluster-iap-bastion-local + waitFor: + - converge safer-cluster-iap-bastion-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSaferClusterIapBastion --stage verify --verbose'] +- id: destroy safer-cluster-iap-bastion-local + waitFor: + - verify safer-cluster-iap-bastion-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSaferClusterIapBastion --stage teardown --verbose'] +- id: converge simple-zonal-with-asm-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleZonalWithAsm --stage apply --verbose'] +- id: verify simple-zonal-with-asm-local + waitFor: + - converge simple-zonal-with-asm-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleZonalWithAsm --stage verify --verbose'] +- id: destroy simple-zonal-with-asm-local + waitFor: + - verify simple-zonal-with-asm-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleZonalWithAsm --stage teardown --verbose'] +- id: converge simple-autopilot-private-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleAutopilotPrivate --stage apply --verbose'] +- id: verify simple-autopilot-private-local + waitFor: + - converge simple-autopilot-private-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleAutopilotPrivate --stage verify --verbose'] +- id: destroy simple-autopilot-private-local + waitFor: + - verify simple-autopilot-private-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleAutopilotPrivate --stage teardown --verbose'] +- id: converge simple-autopilot-public-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleAutopilotPublic --stage apply --verbose'] +- id: verify simple-autopilot-public-local + waitFor: + - converge simple-autopilot-public-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleAutopilotPublic --stage verify --verbose'] +- id: destroy simple-autopilot-public-local + waitFor: + - verify simple-autopilot-public-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleAutopilotPublic --stage teardown --verbose'] +tags: +- 'ci' +- 'integration' +substitutions: + _DOCKER_IMAGE_DEVELOPER_TOOLS: 'cft/developer-tools' + _DOCKER_TAG_VERSION_DEVELOPER_TOOLS: '1.0' +options: + machineType: 'N1_HIGHCPU_8' \ No newline at end of file diff --git a/cli/bptest/testdata/cb/oldAll.yaml b/cli/bptest/testdata/cb/oldAll.yaml new file mode 100644 index 00000000000..b69ff93d8c4 --- /dev/null +++ b/cli/bptest/testdata/cb/oldAll.yaml @@ -0,0 +1,41 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +timeout: 3600s +steps: +- id: prepare + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && prepare_environment'] + env: + - 'TF_VAR_org_id=$_ORG_ID' + - 'TF_VAR_folder_id=$_FOLDER_ID' + - 'TF_VAR_billing_account=$_BILLING_ACCOUNT' +- id: create + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do create'] +- id: converge + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge'] +- id: verify + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify'] +- id: destroy + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy'] +tags: +- 'ci' +- 'integration' +substitutions: + _DOCKER_IMAGE_DEVELOPER_TOOLS: 'cft/developer-tools' + _DOCKER_TAG_VERSION_DEVELOPER_TOOLS: '0' \ No newline at end of file diff --git a/cli/bptest/testdata/cb/oldTarget.yaml b/cli/bptest/testdata/cb/oldTarget.yaml new file mode 100644 index 00000000000..7bbe4bba1ab --- /dev/null +++ b/cli/bptest/testdata/cb/oldTarget.yaml @@ -0,0 +1,369 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +timeout: 12600s +steps: +- id: download acm + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && download_acm'] +- id: prepare + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && prepare_environment && chmod 600 /builder/home/.netrc && sleep 120'] + env: + - 'TF_VAR_org_id=$_ORG_ID' + - 'TF_VAR_folder_id=$_FOLDER_ID' + - 'TF_VAR_billing_account=$_BILLING_ACCOUNT' +- id: create all + waitFor: + - prepare + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do create'] +- id: converge disable-client-cert-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge disable-client-cert-local'] +- id: verify disable-client-cert-local + waitFor: + - converge disable-client-cert-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify disable-client-cert-local'] +- id: destroy disable-client-cert-local + waitFor: + - verify disable-client-cert-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy disable-client-cert-local'] +- id: converge shared-vpc-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge shared-vpc-local'] +- id: verify shared-vpc-local + waitFor: + - converge shared-vpc-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify shared-vpc-local'] +- id: destroy shared-vpc-local + waitFor: + - verify shared-vpc-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy shared-vpc-local'] +- id: converge safer-cluster-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge safer-cluster-local'] +- id: verify safer-cluster-local + waitFor: + - converge safer-cluster-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify safer-cluster-local'] +- id: destroy safer-cluster-local + waitFor: + - verify safer-cluster-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy safer-cluster-local'] +- id: converge simple-regional-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge simple-regional-local'] +- id: verify simple-regional-local + waitFor: + - converge simple-regional-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify simple-regional-local'] +- id: destroy simple-regional-local + waitFor: + - verify simple-regional-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy simple-regional-local'] +- id: converge simple-regional-private-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge simple-regional-private-local'] +- id: verify simple-regional-private-local + waitFor: + - converge simple-regional-private-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify simple-regional-private-local'] +- id: destroy simple-regional-private-local + waitFor: + - verify simple-regional-private-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy simple-regional-private-local'] +- id: converge simple-regional-with-kubeconfig-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge simple-regional-with-kubeconfig-local'] +- id: verify simple-regional-with-kubeconfig-local + waitFor: + - converge simple-regional-with-kubeconfig-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify simple-regional-with-kubeconfig-local'] +- id: destroy simple-regional-with-kubeconfig-local + waitFor: + - verify simple-regional-with-kubeconfig-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy simple-regional-with-kubeconfig-local'] +- id: converge simple-regional-with-networking-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge simple-regional-with-networking-local'] +- id: verify simple-regional-with-networking-local + waitFor: + - converge simple-regional-with-networking-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify simple-regional-with-networking-local'] +- id: destroy simple-regional-with-networking-local + waitFor: + - verify simple-regional-with-networking-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy simple-regional-with-networking-local'] +- id: converge simple-zonal-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge simple-zonal-local'] +- id: verify simple-zonal-local + waitFor: + - converge simple-zonal-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify simple-zonal-local'] +- id: destroy simple-zonal-local + waitFor: + - verify simple-zonal-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy simple-zonal-local'] +- id: converge simple-zonal-private-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge simple-zonal-private-local'] +- id: verify simple-zonal-private-local + waitFor: + - converge simple-zonal-private-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify simple-zonal-private-local'] +- id: destroy simple-zonal-private-local + waitFor: + - verify simple-zonal-private-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy simple-zonal-private-local'] +- id: converge stub-domains-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge stub-domains-local'] +- id: verify stub-domains-local + waitFor: + - converge stub-domains-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify stub-domains-local'] +- id: destroy stub-domains-local + waitFor: + - verify stub-domains-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy stub-domains-local'] +- id: converge upstream-nameservers-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge upstream-nameservers-local'] +- id: verify upstream-nameservers-local + waitFor: + - converge upstream-nameservers-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify upstream-nameservers-local'] +- id: destroy upstream-nameservers-local + waitFor: + - verify upstream-nameservers-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy upstream-nameservers-local'] +- id: converge stub-domains-upstream-nameservers-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge stub-domains-upstream-nameservers-local'] +- id: verify stub-domains-upstream-nameservers-local + waitFor: + - converge stub-domains-upstream-nameservers-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify stub-domains-upstream-nameservers-local'] +- id: destroy stub-domains-upstream-nameservers-local + waitFor: + - verify stub-domains-upstream-nameservers-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy stub-domains-upstream-nameservers-local'] +- id: converge workload-metadata-config-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge workload-metadata-config-local'] +- id: verify workload-metadata-config-local + waitFor: + - converge workload-metadata-config-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify workload-metadata-config-local'] +- id: destroy workload-metadata-config-local + waitFor: + - verify workload-metadata-config-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy workload-metadata-config-local'] +- id: converge beta-cluster-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge beta-cluster-local'] +- id: verify beta-cluster-local + waitFor: + - converge beta-cluster-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify beta-cluster-local'] +- id: destroy beta-cluster-local + waitFor: + - verify beta-cluster-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy beta-cluster-local'] +- id: converge deploy-service-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge deploy-service-local'] +- id: verify deploy-service-local + waitFor: + - converge deploy-service-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify deploy-service-local'] +- id: destroy deploy-service-local + waitFor: + - verify deploy-service-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy deploy-service-local'] +- id: converge node-pool-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge node-pool-local'] +- id: verify node-pool-local + waitFor: + - converge node-pool-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify node-pool-local'] +- id: destroy node-pool-local + waitFor: + - verify node-pool-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy node-pool-local'] +- id: converge sandbox-enabled-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge sandbox-enabled-local'] +- id: verify sandbox-enabled-local + waitFor: + - converge sandbox-enabled-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify sandbox-enabled-local'] +- id: destroy sandbox-enabled-local + waitFor: + - verify sandbox-enabled-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy sandbox-enabled-local'] +- id: converge workload-identity-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge workload-identity-local'] +- id: verify workload-identity-local + waitFor: + - converge workload-identity-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify workload-identity-local'] +- id: destroy workload-identity-local + waitFor: + - verify workload-identity-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy workload-identity-local'] +- id: converge safer-cluster-iap-bastion-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge safer-cluster-iap-bastion-local'] +- id: verify safer-cluster-iap-bastion-local + waitFor: + - converge safer-cluster-iap-bastion-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify safer-cluster-iap-bastion-local'] +- id: destroy safer-cluster-iap-bastion-local + waitFor: + - verify safer-cluster-iap-bastion-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy safer-cluster-iap-bastion-local'] +- id: converge simple-zonal-with-asm-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge simple-zonal-with-asm-local'] +- id: verify simple-zonal-with-asm-local + waitFor: + - converge simple-zonal-with-asm-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify simple-zonal-with-asm-local'] +- id: destroy simple-zonal-with-asm-local + waitFor: + - verify simple-zonal-with-asm-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy simple-zonal-with-asm-local'] +- id: converge simple-autopilot-private-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge simple-autopilot-private-local'] +- id: verify simple-autopilot-private-local + waitFor: + - converge simple-autopilot-private-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify simple-autopilot-private-local'] +- id: destroy simple-autopilot-private-local + waitFor: + - verify simple-autopilot-private-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy simple-autopilot-private-local'] +- id: converge simple-autopilot-public-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge simple-autopilot-public-local'] +- id: verify simple-autopilot-public-local + waitFor: + - converge simple-autopilot-public-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify simple-autopilot-public-local'] +- id: destroy simple-autopilot-public-local + waitFor: + - verify simple-autopilot-public-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy simple-autopilot-public-local'] +tags: +- 'ci' +- 'integration' +substitutions: + _DOCKER_IMAGE_DEVELOPER_TOOLS: 'cft/developer-tools' + _DOCKER_TAG_VERSION_DEVELOPER_TOOLS: '1.0' +options: + machineType: 'N1_HIGHCPU_8' \ No newline at end of file diff --git a/cli/bptest/testdata/init/examples/foo/main.tf b/cli/bptest/testdata/init/examples/foo/main.tf new file mode 100644 index 00000000000..17d5d1f2146 --- /dev/null +++ b/cli/bptest/testdata/init/examples/foo/main.tf @@ -0,0 +1,3 @@ +output "foo" { + value = "foo" +} diff --git a/cli/bptest/testdata/init/test/integration/bar/bar_test.go b/cli/bptest/testdata/init/test/integration/bar/bar_test.go new file mode 100644 index 00000000000..43a64da7184 --- /dev/null +++ b/cli/bptest/testdata/init/test/integration/bar/bar_test.go @@ -0,0 +1 @@ +package bptest diff --git a/cli/bptest/testdata/kitchen-tests/simple-example/controls/gcloud.rb b/cli/bptest/testdata/kitchen-tests/simple-example/controls/gcloud.rb new file mode 100644 index 00000000000..8998e4fbd0b --- /dev/null +++ b/cli/bptest/testdata/kitchen-tests/simple-example/controls/gcloud.rb @@ -0,0 +1 @@ +# ruby test \ No newline at end of file diff --git a/cli/bptest/testdata/kitchen-tests/simple-example/inspec.yml b/cli/bptest/testdata/kitchen-tests/simple-example/inspec.yml new file mode 100644 index 00000000000..bde8d6cec66 --- /dev/null +++ b/cli/bptest/testdata/kitchen-tests/simple-example/inspec.yml @@ -0,0 +1,50 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name: simple-example +depends: + - name: inspec-gcp + git: https://github.com/inspec/inspec-gcp.git + tag: v1.8.0 +attributes: + - name: project_id + required: true + type: string + - name: location + required: true + type: string + - name: cluster_name + required: true + type: string + - name: master_kubernetes_version + required: true + type: string + - name: kubernetes_endpoint + required: true + type: string + - name: client_token + required: true + type: string + - name: service_account + required: true + type: string + - name: service_account + required: true + type: string + - name: database_encryption_key_name + required: true + type: string + - name: identity_namespace + required: true + type: string \ No newline at end of file diff --git a/dm/tests/templates/__init__.py b/cli/bptest/testdata/with-discovery/examples/bar/.gitkeep similarity index 100% rename from dm/tests/templates/__init__.py rename to cli/bptest/testdata/with-discovery/examples/bar/.gitkeep diff --git a/dm/tests/unit/__init__.py b/cli/bptest/testdata/with-discovery/examples/baz/.gitkeep similarity index 100% rename from dm/tests/unit/__init__.py rename to cli/bptest/testdata/with-discovery/examples/baz/.gitkeep diff --git a/infra/build/developer-tools/build/scripts/module-swapper/cmd/testdata/example-module-simple/Readme.md b/cli/bptest/testdata/with-discovery/examples/foo/.gitkeep similarity index 100% rename from infra/build/developer-tools/build/scripts/module-swapper/cmd/testdata/example-module-simple/Readme.md rename to cli/bptest/testdata/with-discovery/examples/foo/.gitkeep diff --git a/cli/bptest/testdata/with-discovery/examples/quux/test.yaml b/cli/bptest/testdata/with-discovery/examples/quux/test.yaml new file mode 100644 index 00000000000..3345e7e20df --- /dev/null +++ b/cli/bptest/testdata/with-discovery/examples/quux/test.yaml @@ -0,0 +1,20 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: blueprints.cloud.google.com/v1alpha1 +kind: BlueprintTest +metadata: + name: quux +spec: + skip: true diff --git a/infra/build/developer-tools/build/scripts/module-swapper/cmd/testdata/example-module-simple/examples/main.tf b/cli/bptest/testdata/with-discovery/test/fixtures/.gitkeep similarity index 100% rename from infra/build/developer-tools/build/scripts/module-swapper/cmd/testdata/example-module-simple/examples/main.tf rename to cli/bptest/testdata/with-discovery/test/fixtures/.gitkeep diff --git a/infra/build/developer-tools/build/scripts/module-swapper/cmd/testdata/example-module-with-submodules/docs/Readme.md b/cli/bptest/testdata/with-discovery/test/fixtures/foo/.gitkeep similarity index 100% rename from infra/build/developer-tools/build/scripts/module-swapper/cmd/testdata/example-module-with-submodules/docs/Readme.md rename to cli/bptest/testdata/with-discovery/test/fixtures/foo/.gitkeep diff --git a/cli/bptest/testdata/with-discovery/test/fixtures/quuz/test.yaml b/cli/bptest/testdata/with-discovery/test/fixtures/quuz/test.yaml new file mode 100644 index 00000000000..1d3e17b7010 --- /dev/null +++ b/cli/bptest/testdata/with-discovery/test/fixtures/quuz/test.yaml @@ -0,0 +1,20 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: blueprints.cloud.google.com/v1alpha1 +kind: BlueprintTest +metadata: + name: quuz +spec: + skip: true diff --git a/infra/build/developer-tools/build/scripts/module-swapper/cmd/testdata/example-module-with-submodules/examples/main.tf b/cli/bptest/testdata/with-discovery/test/fixtures/qux/.gitkeep similarity index 100% rename from infra/build/developer-tools/build/scripts/module-swapper/cmd/testdata/example-module-with-submodules/examples/main.tf rename to cli/bptest/testdata/with-discovery/test/fixtures/qux/.gitkeep diff --git a/infra/build/developer-tools/build/scripts/module-swapper/cmd/testdata/example-module-with-submodules/modules/bar-module/main.tf b/cli/bptest/testdata/with-discovery/test/integration/.gitkeep similarity index 100% rename from infra/build/developer-tools/build/scripts/module-swapper/cmd/testdata/example-module-with-submodules/modules/bar-module/main.tf rename to cli/bptest/testdata/with-discovery/test/integration/.gitkeep diff --git a/infra/build/developer-tools/build/scripts/module-swapper/cmd/testdata/example-module-with-submodules/modules/foo-module/main.tf b/cli/bptest/testdata/with-discovery/test/integration/bar/.gitkeep similarity index 100% rename from infra/build/developer-tools/build/scripts/module-swapper/cmd/testdata/example-module-with-submodules/modules/foo-module/main.tf rename to cli/bptest/testdata/with-discovery/test/integration/bar/.gitkeep diff --git a/cli/bptest/testdata/with-discovery/test/integration/bar/bar_test.go b/cli/bptest/testdata/with-discovery/test/integration/bar/bar_test.go new file mode 100644 index 00000000000..1252f0eb52e --- /dev/null +++ b/cli/bptest/testdata/with-discovery/test/integration/bar/bar_test.go @@ -0,0 +1,7 @@ +package bar + +import "testing" + +func TestBar(t *testing.T) { + t.Log("Ran test") +} diff --git a/cli/bptest/testdata/with-discovery/test/integration/discover_test.go b/cli/bptest/testdata/with-discovery/test/integration/discover_test.go new file mode 100644 index 00000000000..4c94a12eb16 --- /dev/null +++ b/cli/bptest/testdata/with-discovery/test/integration/discover_test.go @@ -0,0 +1,9 @@ +package test + +import ( + "testing" +) + +func TestAll(t *testing.T) { + +} diff --git a/cli/bptest/testdata/with-discovery/test/integration/foo/.gitkeep b/cli/bptest/testdata/with-discovery/test/integration/foo/.gitkeep new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cli/bptest/testdata/with-discovery/test/integration/foo/foo_test.go b/cli/bptest/testdata/with-discovery/test/integration/foo/foo_test.go new file mode 100644 index 00000000000..2f9cfd81986 --- /dev/null +++ b/cli/bptest/testdata/with-discovery/test/integration/foo/foo_test.go @@ -0,0 +1,7 @@ +package foo + +import "testing" + +func TestFoo(t *testing.T) { + t.Log("Ran test") +} diff --git a/cli/bptest/testdata/with-discovery/test/integration/quuz/quuz_test.go b/cli/bptest/testdata/with-discovery/test/integration/quuz/quuz_test.go new file mode 100644 index 00000000000..cc5085568ba --- /dev/null +++ b/cli/bptest/testdata/with-discovery/test/integration/quuz/quuz_test.go @@ -0,0 +1,7 @@ +package quuz + +import "testing" + +func TestQuuz(t *testing.T) { + t.Log("Ran test") +} diff --git a/cli/cmd/blueprint.go b/cli/cmd/blueprint.go new file mode 100644 index 00000000000..dbd22688ee5 --- /dev/null +++ b/cli/cmd/blueprint.go @@ -0,0 +1,30 @@ +package cmd + +import ( + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/cli/bpbuild" + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/cli/bpcatalog" + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/cli/bpmetadata" + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/cli/bptest" + "github.com/spf13/cobra" +) + +func init() { + blueprintCmd.AddCommand(bpmetadata.Cmd) + blueprintCmd.AddCommand(bpbuild.Cmd) + blueprintCmd.AddCommand(bptest.Cmd) + blueprintCmd.AddCommand(bpcatalog.Cmd) + + rootCmd.AddCommand(blueprintCmd) +} + +var blueprintCmd = &cobra.Command{ + Use: "blueprint", + Short: "Blueprint CLI", + Long: `The CFT blueprint CLI is used to execute commands specific to blueprints such as test, builds & metadata`, + Args: cobra.NoArgs, + Run: func(cmd *cobra.Command, args []string) { + if len(args) == 0 { + cmd.HelpFunc()(cmd, args) + } + }, +} diff --git a/cli/cmd/launchpad.go b/cli/cmd/launchpad.go index 1754ea3c9ff..193264f8b26 100644 --- a/cli/cmd/launchpad.go +++ b/cli/cmd/launchpad.go @@ -25,7 +25,7 @@ var launchpadCmd = &cobra.Command{ Cloud Foundation Ecosystem Convention. Taking YAML and generate opinionated infrastructure resources ready to be deployed in Infrastructure as Code style`, Run: func(cmd *cobra.Command, args []string) { - if args == nil || len(args) == 0 { + if len(args) == 0 { cmd.HelpFunc()(cmd, args) } }, @@ -38,7 +38,7 @@ var launchpadGenerateCmd = &cobra.Command{ Long: `Generate infrastructure foundation via defined YAML`, Args: cobra.MinimumNArgs(1), Run: func(cmd *cobra.Command, args []string) { - if args == nil || len(args) == 0 { + if len(args) == 0 { cmd.HelpFunc()(cmd, args) } else { launchpad.NewGenerate(args, launchpad.NewOutputFlavor(launchpadOutputFlavor), launchpadOutputDirectory) diff --git a/cli/cmd/root.go b/cli/cmd/root.go index b394398eca5..b97b8507639 100644 --- a/cli/cmd/root.go +++ b/cli/cmd/root.go @@ -3,10 +3,13 @@ package cmd import ( "os" + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/cli/bpbuild" + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/cli/bptest" "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/cli/report" "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/cli/scorecard" log "github.com/inconshreveable/log15" "github.com/spf13/cobra" + "github.com/spf13/viper" ) // rootCmd represents the base command when called without any subcommands @@ -17,7 +20,7 @@ var rootCmd = &cobra.Command{ Args: cobra.NoArgs, Run: func(cmd *cobra.Command, args []string) { // no params means same as -h flag - if args == nil || len(args) == 0 { + if len(args) == 0 { cmd.HelpFunc()(cmd, args) } }, @@ -25,6 +28,8 @@ var rootCmd = &cobra.Command{ if !flags.verbose { // discard logs scorecard.Log.SetHandler(log.DiscardHandler()) + bptest.Log.SetHandler(log.DiscardHandler()) + bpbuild.Log.SetHandler(log.DiscardHandler()) } // We want to dump to stdout by default cmd.SetOut(cmd.OutOrStdout()) @@ -66,9 +71,15 @@ Use "{{.CommandPath}} [command] --help" for more information about a command.{{e } rootCmd.PersistentFlags().BoolVar(&flags.verbose, "verbose", false, "Log output to stdout") + err := viper.BindPFlag("verbose", rootCmd.PersistentFlags().Lookup("verbose")) + if err != nil { + panic(err) + } rootCmd.AddCommand(scorecard.Cmd) rootCmd.AddCommand(report.Cmd) + rootCmd.AddCommand(bptest.Cmd) + rootCmd.AddCommand(bpbuild.Cmd) } func Execute() { diff --git a/cli/cmd/version_test.go b/cli/cmd/version_test.go index 2d584afc63b..b509c448fe7 100644 --- a/cli/cmd/version_test.go +++ b/cli/cmd/version_test.go @@ -22,7 +22,6 @@ func TestVersionCommand(t *testing.T) { } func TestVersionCommandHelp(t *testing.T) { - output, err := ExecuteCommand(rootCmd, "version", "-h") if !strings.HasPrefix(output, versionCmd.Long) { t.Errorf("Unexpected output: %v", output) diff --git a/cli/docs/scorecard.md b/cli/docs/scorecard.md index 7669f618102..f2046b4679f 100644 --- a/cli/docs/scorecard.md +++ b/cli/docs/scorecard.md @@ -74,16 +74,16 @@ The user guide in rest of this document provides examples for Linux and OS X env You can also use --refresh flag to create or overwrite CAI export files in GCS bucket and perform analysis, within one step. ``` -# Running Cloud Asset Inventory API via Cloud SDK requires a service account and does not support end user credentials. +# Running Cloud Asset Inventory API via Cloud SDK requires a service account and does not support end user credentials. # Configure Application Default Credential to use a service account key if running outside GCP -# The service account needs be created in a Cloud Asset Inventory enabled project, -# with Cloud Asset Viewer role at target project/folder/org, +# The service account needs be created in a Cloud Asset Inventory enabled project, +# with Cloud Asset Viewer role at target project/folder/org, # and Storage Object Viewer role at $CAI_BUCKET_NAME export GOOGLE_APPLICATION_CREDENTIALS=sa_key.json ./cft scorecard --policy-path ./policy-library \ --bucket=$CAI_BUCKET_NAME \ - --refresh + --refresh ``` ### Using a local export @@ -117,7 +117,7 @@ Print a scorecard of your GCP environment, for resources and IAM policies in Clo Usage: cft scorecard [flags] - + Flags: --bucket string GCS bucket name for storing inventory (conflicts with --dir-path or --stdin) diff --git a/cli/go.mod b/cli/go.mod index f166f411273..83c9a0fa763 100644 --- a/cli/go.mod +++ b/cli/go.mod @@ -1,21 +1,239 @@ module github.com/GoogleCloudPlatform/cloud-foundation-toolkit/cli -go 1.16 +go 1.23 + +toolchain go1.23.4 require ( - cloud.google.com/go/asset v1.0.1 - cloud.google.com/go/storage v1.18.2 - github.com/GoogleCloudPlatform/config-validator v0.0.0-20211122204404-f3fd77c5c355 - github.com/briandowns/spinner v1.16.0 - github.com/gammazero/workerpool v1.1.2 - github.com/golang/protobuf v1.5.2 - github.com/google/go-cmp v0.5.6 - github.com/inconshreveable/log15 v0.0.0-20201112154412-8562bdadbbac - github.com/open-policy-agent/opa v0.34.2 + cloud.google.com/go/asset v1.20.3 + cloud.google.com/go/storage v1.49.0 + github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test v0.17.3 + github.com/GoogleCloudPlatform/config-validator v0.0.0-20230824155412-0da46e6a67ad + github.com/briandowns/spinner v1.23.1 + github.com/fatih/color v1.18.0 + github.com/gammazero/workerpool v1.1.3 + github.com/go-git/go-git/v5 v5.12.0 + github.com/gomarkdown/markdown v0.0.0-20241205020045-f7e15b2f3e62 + github.com/google/go-cmp v0.6.0 + github.com/google/go-github/v68 v68.0.0 + github.com/gruntwork-io/terratest v0.48.1 + github.com/hashicorp/go-version v1.7.0 + github.com/hashicorp/hcl/v2 v2.23.0 + github.com/hashicorp/terraform-config-inspect v0.0.0-20241129133400-c404f8227ea6 + github.com/hashicorp/terraform-json v0.24.0 + github.com/iancoleman/strcase v0.3.0 + github.com/inconshreveable/log15 v2.16.0+incompatible + github.com/invopop/jsonschema v0.12.0 + github.com/itchyny/json2yaml v0.1.4 + github.com/jedib0t/go-pretty v4.3.0+incompatible + github.com/jedib0t/go-pretty/v6 v6.6.5 + github.com/manifoldco/promptui v0.9.0 + github.com/migueleliasweb/go-github-mock v1.1.0 + github.com/mitchellh/go-testing-interface v1.14.2-0.20210821155943-2d9075ca8770 + github.com/open-policy-agent/opa v0.70.0 + github.com/otiai10/copy v1.14.0 github.com/pkg/errors v0.9.1 - github.com/spf13/cobra v1.2.1 - github.com/spf13/viper v1.9.0 - github.com/stretchr/testify v1.7.0 - google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12 - gopkg.in/yaml.v2 v2.4.0 + github.com/spf13/cobra v1.8.1 + github.com/spf13/viper v1.19.0 + github.com/stretchr/testify v1.10.0 + github.com/xeipuuv/gojsonschema v1.2.0 + github.com/zclconf/go-cty v1.15.1 + golang.org/x/oauth2 v0.24.0 + golang.org/x/text v0.21.0 + google.golang.org/api v0.214.0 + google.golang.org/protobuf v1.36.1 + gopkg.in/yaml.v3 v3.0.1 + sigs.k8s.io/yaml v1.4.0 +) + +require ( + cel.dev/expr v0.16.2 // indirect + cloud.google.com/go v0.116.0 // indirect + cloud.google.com/go/accesscontextmanager v1.9.2 // indirect + cloud.google.com/go/auth v0.13.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.6 // indirect + cloud.google.com/go/compute/metadata v0.6.0 // indirect + cloud.google.com/go/iam v1.2.2 // indirect + cloud.google.com/go/longrunning v0.6.2 // indirect + cloud.google.com/go/monitoring v1.21.2 // indirect + cloud.google.com/go/orgpolicy v1.14.1 // indirect + cloud.google.com/go/osconfig v1.14.2 // indirect + dario.cat/mergo v1.0.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/OneOfOne/xxhash v1.2.8 // indirect + github.com/ProtonMail/go-crypto v1.0.0 // indirect + github.com/agext/levenshtein v1.2.3 // indirect + github.com/agnivade/levenshtein v1.2.0 // indirect + github.com/alexflint/go-filemutex v1.3.0 // indirect + github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect + github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect + github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect + github.com/bahlo/generic-list-go v0.2.0 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/buger/jsonparser v1.1.1 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e // indirect + github.com/cloudflare/circl v1.3.7 // indirect + github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 // indirect + github.com/cyphar/filepath-securejoin v0.2.4 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/emirpasic/gods v1.18.1 // indirect + github.com/envoyproxy/go-control-plane v0.13.1 // indirect + github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect + github.com/evanphx/json-patch/v5 v5.6.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fsnotify/fsnotify v1.8.0 // indirect + github.com/gammazero/deque v0.2.0 // indirect + github.com/go-errors/errors v1.5.0 // indirect + github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect + github.com/go-git/go-billy/v5 v5.5.0 // indirect + github.com/go-ini/ini v1.67.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/analysis v0.21.2 // indirect + github.com/go-openapi/errors v0.20.2 // indirect + github.com/go-openapi/jsonpointer v0.20.0 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/loads v0.21.1 // indirect + github.com/go-openapi/spec v0.20.4 // indirect + github.com/go-openapi/strfmt v0.21.2 // indirect + github.com/go-openapi/swag v0.22.4 // indirect + github.com/go-openapi/validate v0.21.0 // indirect + github.com/go-stack/stack v1.8.1 // indirect + github.com/gobwas/glob v0.2.3 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/glog v1.2.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/cel-go v0.12.6 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/go-github/v64 v64.0.0 // indirect + github.com/google/go-querystring v1.1.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/s2a-go v0.1.8 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect + github.com/googleapis/gax-go/v2 v2.14.0 // indirect + github.com/gorilla/mux v1.8.1 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-getter/v2 v2.2.3 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-safetemp v1.0.0 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/imdario/mergo v0.3.16 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect + github.com/jinzhu/copier v0.4.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/kevinburke/ssh_config v1.2.0 // indirect + github.com/klauspost/compress v1.17.9 // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-runewidth v0.0.15 // indirect + github.com/mattn/go-shellwords v1.0.12 // indirect + github.com/mattn/go-zglob v0.0.4 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/oklog/ulid v1.3.1 // indirect + github.com/open-policy-agent/frameworks/constraint v0.0.0-20230712214810-96753a21c26f // indirect + github.com/open-policy-agent/gatekeeper/v3 v3.13.0 // indirect + github.com/pelletier/go-toml/v2 v2.2.2 // indirect + github.com/pjbgf/sha1cd v0.3.0 // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_golang v1.20.5 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect + github.com/rivo/uniseg v0.2.0 // indirect + github.com/sagikazarmark/locafero v0.4.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/skeema/knownhosts v1.2.2 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.6.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/stoewer/go-strcase v1.2.0 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/tchap/go-patricia/v2 v2.3.1 // indirect + github.com/tidwall/gjson v1.18.0 // indirect + github.com/tidwall/match v1.1.1 // indirect + github.com/tidwall/pretty v1.2.1 // indirect + github.com/tidwall/sjson v1.2.5 // indirect + github.com/tmccombs/hcl2json v0.6.4 // indirect + github.com/ulikunitz/xz v0.5.11 // indirect + github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect + github.com/xanzy/ssh-agent v0.3.3 // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect + github.com/yashtewari/glob-intersection v0.2.0 // indirect + go.mongodb.org/mongo-driver v1.8.3 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/detectors/gcp v1.31.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect + go.opentelemetry.io/otel v1.33.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 // indirect + go.opentelemetry.io/otel/metric v1.33.0 // indirect + go.opentelemetry.io/otel/sdk v1.33.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.31.0 // indirect + go.opentelemetry.io/otel/trace v1.33.0 // indirect + go.opentelemetry.io/proto/otlp v1.4.0 // indirect + go.uber.org/atomic v1.11.0 // indirect + go.uber.org/multierr v1.9.0 // indirect + golang.org/x/crypto v0.31.0 // indirect + golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c // indirect + golang.org/x/mod v0.22.0 // indirect + golang.org/x/net v0.33.0 // indirect + golang.org/x/sync v0.10.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/term v0.27.0 // indirect + golang.org/x/time v0.8.0 // indirect + golang.org/x/tools v0.26.0 // indirect + google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 // indirect + google.golang.org/grpc v1.68.1 // indirect + google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/warnings.v0 v0.1.2 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + k8s.io/api v0.28.4 // indirect + k8s.io/apiextensions-apiserver v0.27.2 // indirect + k8s.io/apimachinery v0.28.4 // indirect + k8s.io/apiserver v0.27.2 // indirect + k8s.io/client-go v0.28.4 // indirect + k8s.io/component-base v0.27.2 // indirect + k8s.io/klog/v2 v2.100.1 // indirect + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect + k8s.io/kubectl v0.27.2 // indirect + k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 // indirect + sigs.k8s.io/controller-runtime v0.15.1 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/kustomize/kyaml v0.18.1 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect ) diff --git a/cli/go.sum b/cli/go.sum index 996b73916f6..2e7894e3c22 100644 --- a/cli/go.sum +++ b/cli/go.sum @@ -1,1549 +1,761 @@ -4d63.com/gochecknoglobals v0.0.0-20201008074935-acfc0b28355a/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= -bitbucket.org/creachadair/shell v0.0.6/go.mod h1:8Qqi/cYk7vPnsOePHroKXDJYmb5x7ENhtiFtfZq8K+M= +cel.dev/expr v0.16.2 h1:RwRhoH17VhAu9U5CMvMhH1PDVgf0tuz9FT+24AfMLfU= +cel.dev/expr v0.16.2/go.mod h1:gXngZQMkWJoSbE8mOzehJlXQyubn/Vg0vR9/F3W7iw8= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.60.0/go.mod h1:yw2G51M9IfRboUH61Us8GqCeF1PzPblB823Mn2q2eAU= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0 h1:3DXvAyifywvq64LfkKaMOmkWPS1CikIQdMe2lY9vxU8= -cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go/asset v1.0.1 h1:itBmxPI2oR9RLN3ADFLyYkdMa/ro1RZ4ySEQkFO5H7Q= -cloud.google.com/go/asset v1.0.1/go.mod h1:666udTyY1NrBD3w0yWiLViXFQ/w3kPQ43lrjY3RbeNI= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/firestore v1.6.0/go.mod h1:afJwI0vaXwAG54kI7A//lP/lSPDkQORQuMkv56TxEPU= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.5.0/go.mod h1:ZEwJccE3z93Z2HWvstpri00jOg7oO4UZDtKhwDwqF0w= -cloud.google.com/go/spanner v1.7.0/go.mod h1:sd3K2gZ9Fd0vMPLXzeCrF6fq4i63Q7aTLW/lBIfBkIk= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.18.2 h1:5NQw6tOn3eMm0oE8vTkfjau18kjL79FlMjy/CHTpmoY= -cloud.google.com/go/storage v1.18.2/go.mod h1:AiIj7BWXyhO5gGVmYJ+S8tbkCx3yb0IMjua8Aw4naVM= -contrib.go.opencensus.io/exporter/prometheus v0.1.0/go.mod h1:cGFniUXGZlKRjzOyuZJ6mgB+PgBcCIa79kEKR8YCW+A= -contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE= +cloud.google.com/go v0.116.0/go.mod h1:cEPSRWPzZEswwdr9BxE6ChEn01dWlTaF05LiC2Xs70U= +cloud.google.com/go/accesscontextmanager v1.9.2 h1:P0uVixQft8aacbZ7VDZStNZdrftF24Hk8JkA3kfvfqI= +cloud.google.com/go/accesscontextmanager v1.9.2/go.mod h1:T0Sw/PQPyzctnkw1pdmGAKb7XBA84BqQzH0fSU7wzJU= +cloud.google.com/go/asset v1.20.3 h1:/jQBAkZVUbsIczRepDkwaf/K5NcRYvQ6MBiWg5i20fU= +cloud.google.com/go/asset v1.20.3/go.mod h1:797WxTDwdnFAJzbjZ5zc+P5iwqXc13yO9DHhmS6wl+o= +cloud.google.com/go/auth v0.13.0 h1:8Fu8TZy167JkW8Tj3q7dIkr2v4cndv41ouecJx0PAHs= +cloud.google.com/go/auth v0.13.0/go.mod h1:COOjD9gwfKNKz+IIduatIhYJQIc0mG3H102r/EMxX6Q= +cloud.google.com/go/auth/oauth2adapt v0.2.6 h1:V6a6XDu2lTwPZWOawrAa9HUK+DB2zfJyTuciBG5hFkU= +cloud.google.com/go/auth/oauth2adapt v0.2.6/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= +cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= +cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= +cloud.google.com/go/iam v1.2.2 h1:ozUSofHUGf/F4tCNy/mu9tHLTaxZFLOUiKzjcgWHGIA= +cloud.google.com/go/iam v1.2.2/go.mod h1:0Ys8ccaZHdI1dEUilwzqng/6ps2YB6vRsjIe00/+6JY= +cloud.google.com/go/logging v1.12.0 h1:ex1igYcGFd4S/RZWOCU51StlIEuey5bjqwH9ZYjHibk= +cloud.google.com/go/logging v1.12.0/go.mod h1:wwYBt5HlYP1InnrtYI0wtwttpVU1rifnMT7RejksUAM= +cloud.google.com/go/longrunning v0.6.2 h1:xjDfh1pQcWPEvnfjZmwjKQEcHnpz6lHjfy7Fo0MK+hc= +cloud.google.com/go/longrunning v0.6.2/go.mod h1:k/vIs83RN4bE3YCswdXC5PFfWVILjm3hpEUlSko4PiI= +cloud.google.com/go/monitoring v1.21.2 h1:FChwVtClH19E7pJ+e0xUhJPGksctZNVOk2UhMmblmdU= +cloud.google.com/go/monitoring v1.21.2/go.mod h1:hS3pXvaG8KgWTSz+dAdyzPrGUYmi2Q+WFX8g2hqVEZU= +cloud.google.com/go/orgpolicy v1.14.1 h1:c1QLoM5v8/aDKgYVCUaC039lD3GPvqAhTVOwsGhIoZQ= +cloud.google.com/go/orgpolicy v1.14.1/go.mod h1:1z08Hsu1mkoH839X7C8JmnrqOkp2IZRSxiDw7W/Xpg4= +cloud.google.com/go/osconfig v1.14.2 h1:iBN87PQc+EGh5QqijM3CuxcibvDWmF+9k0eOJT27FO4= +cloud.google.com/go/osconfig v1.14.2/go.mod h1:kHtsm0/j8ubyuzGciBsRxFlbWVjc4c7KdrwJw0+g+pQ= +cloud.google.com/go/storage v1.49.0 h1:zenOPBOWHCnojRd9aJZAyQXBYqkJkdQS42dxL55CIMw= +cloud.google.com/go/storage v1.49.0/go.mod h1:k1eHhhpLvrPjVGfo0mOUPEJ4Y2+a/Hv5PiwehZI9qGU= +cloud.google.com/go/trace v1.11.2 h1:4ZmaBdL8Ng/ajrgKqY5jfvzqMXbrDcBsUGXOT9aqTtI= +cloud.google.com/go/trace v1.11.2/go.mod h1:bn7OwXd4pd5rFuAnTrzBuoZ4ax2XQeG3qNgYmfCy0Io= +dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= -github.com/GoogleCloudPlatform/config-validator v0.0.0-20211122204404-f3fd77c5c355 h1:UQQJmJl5aZNwLISiq/YqEEFXxv042cYY6wD49TOzrco= -github.com/GoogleCloudPlatform/config-validator v0.0.0-20211122204404-f3fd77c5c355/go.mod h1:MHzWJNkQiBhSRuWAOxdMBBR+acz6g+jXGDtZp5S7vzU= -github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= -github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OneOfOne/xxhash v1.2.3/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OneOfOne/xxhash v1.2.7/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= +github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test v0.17.3 h1:+lqTQyIdgr9XbI/onTPpwDnKR8I+MFKuIi6jeO8qDDQ= +github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test v0.17.3/go.mod h1:7uX+sVrlOPmrpkfuVSoO9qO0tWc23lVVCIQOL9GM5Qs= +github.com/GoogleCloudPlatform/config-validator v0.0.0-20230824155412-0da46e6a67ad h1:JosY9Jj9NbPZIPxhMt2YAooduf+6OMT5XlStRJ2epaE= +github.com/GoogleCloudPlatform/config-validator v0.0.0-20230824155412-0da46e6a67ad/go.mod h1:pW/8BBAWENpV8WiQELmsRySkvdTgE/+H4jBQxC/Fous= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0 h1:3c8yed4lgqTt+oTQ+JNMDo+F4xprBf+O/il4ZC0nRLw= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0/go.mod h1:obipzmGjfSjam60XLwGfqUkJsfiheAl+TUjG+4yzyPM= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 h1:UQ0AhxogsIRZDkElkblfnwjc3IaltCm2HUMvezQaL7s= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1/go.mod h1:jyqM3eLpJ3IbIFDTKVz2rF9T/xWGW0rIriGwnz8l9Tk= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.48.1 h1:oTX4vsorBZo/Zdum6OKPA4o7544hm6smoRv1QjpTwGo= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.48.1/go.mod h1:0wEl7vrAD8mehJyohS9HZy+WyEOaQO2mJx86Cvh93kM= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 h1:8nn+rsCvTq9axyEh382S0PFLBeaFwNsT43IrPWzctRU= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1/go.mod h1:viRWSEhtMZqz1rhwmOVKkWl6SwmVowfL9O2YR5gI2PE= +github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8= github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= -github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= -github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= +github.com/ProtonMail/go-crypto v1.0.0 h1:LRuvITjQWX+WIfr930YHG2HNfjR1uOfyf5vE0kC2U78= +github.com/ProtonMail/go-crypto v1.0.0/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= -github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= -github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= -github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= -github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= -github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= -github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/ashanbrown/forbidigo v1.1.0/go.mod h1:vVW7PEdqEFqapJe95xHkTfB1+XvZXBFg8t0sG2FIxmI= -github.com/ashanbrown/makezero v0.0.0-20210308000810-4155955488a0/go.mod h1:oG9Dnez7/ESBqc4EdrdNlryeo7d0KcW1ftXHm7nU/UU= -github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= -github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.36.30/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= +github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/agnivade/levenshtein v1.2.0 h1:U9L4IOT0Y3i0TIlUIDJ7rVUziKi/zPbrJGaFrtYH3SY= +github.com/agnivade/levenshtein v1.2.0/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU= +github.com/alexflint/go-filemutex v1.3.0 h1:LgE+nTUWnQCyRKbpoceKZsPQbs84LivvgwUymZXdOcM= +github.com/alexflint/go-filemutex v1.3.0/go.mod h1:U0+VA/i30mGBlLCrFPGtTe9y6wGQfNAWPBTekHQ+c8A= +github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= +github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= +github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 h1:yL7+Jz0jTC6yykIK/Wh74gnTJnrGr5AyrNMXuA0gves= +github.com/antlr/antlr4/runtime/Go/antlr v1.4.10/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= +github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= +github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= +github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q= +github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= +github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= -github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI= -github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/bombsimon/wsl/v3 v3.3.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= -github.com/briandowns/spinner v1.16.0 h1:DFmp6hEaIx2QXXuqSJmtfSBSAjRmpGiKG6ip2Wm/yOs= -github.com/briandowns/spinner v1.16.0/go.mod h1:QOuQk7x+EaDASo80FEXwlwiA+j/PPIcX3FScO+3/ZPQ= -github.com/bytecodealliance/wasmtime-go v0.27.0/go.mod h1:q320gUxqyI8yB+ZqRuaJOEnGkAnHh6WtJjMaT2CW4wI= -github.com/bytecodealliance/wasmtime-go v0.30.0 h1:WfYpr4WdqInt8m5/HvYinf+HrSEAIhItKIcth+qb1h4= -github.com/bytecodealliance/wasmtime-go v0.30.0/go.mod h1:q320gUxqyI8yB+ZqRuaJOEnGkAnHh6WtJjMaT2CW4wI= -github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= -github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/briandowns/spinner v1.23.1 h1:t5fDPmScwUjozhDj4FA46p5acZWIPXYE30qW2Ptu650= +github.com/briandowns/spinner v1.23.1/go.mod h1:LaZeM4wm2Ywy6vO571mvhQNRcWfRUnXOs0RcKV0wYKM= +github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= +github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= +github.com/bytecodealliance/wasmtime-go/v3 v3.0.2 h1:3uZCA/BLTIu+DqCfguByNMJa2HVHpXvjfy0Dy7g6fuA= +github.com/bytecodealliance/wasmtime-go/v3 v3.0.2/go.mod h1:RnUjnIXxEJcL6BgCvNyzCCRzZcxCgsZCi+RNlvYor5Q= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= -github.com/charithe/durationcheck v0.0.6/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg= -github.com/chavacava/garif v0.0.0-20210405163807-87a70f3d418b/go.mod h1:Qjyv4H3//PWVzTeCezG2b9IRn6myJxJSr4TD/xo6ojU= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= +github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= +github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.17+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 h1:QVw89YDxXxEe+l8gU8ETbOasdwEV+avkR75ZzsVV9WI= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/daixiang0/gci v0.2.8/go.mod h1:+4dZ7TISfSmqfAGv59ePaHfNzgGtIkHAhhdKggP1JAc= -github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= +github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= -github.com/denis-tingajkin/go-header v0.4.2/go.mod h1:eLRHAVXzE5atsKAnNRDB90WHCFFnBUn4RN0nRcs1LJA= -github.com/dgraph-io/badger/v3 v3.2103.2 h1:dpyM5eCJAtQCBcMCZcT4UBZchuTJgCywerHHgmxfxM8= -github.com/dgraph-io/badger/v3 v3.2103.2/go.mod h1:RHo4/GmYcKKh5Lxu63wLEMHJ70Pac2JqZRYGhlyAo2M= -github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI= -github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgraph-io/badger/v3 v3.2103.5 h1:ylPa6qzbjYRQMU6jokoj4wzcaweHylt//CH0AKt0akg= +github.com/dgraph-io/badger/v3 v3.2103.5/go.mod h1:4MPiseMeDQ3FNCYwRbbcBOGJLf5jsE0PPFzRiKjtcdw= +github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= +github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= +github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54 h1:SG7nF6SRlWhcT7cNTs5R6Hk4V2lcmLz2NsG2VnInyNo= +github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU= +github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= +github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/go-control-plane v0.13.1 h1:vPfJZCkob6yTMEgS+0TwfTUfbHjfy/6vOJ8hUWX/uXE= +github.com/envoyproxy/go-control-plane v0.13.1/go.mod h1:X45hY0mufo6Fd0KW3rqsGvQMw58jvjymeCzBU3mWyHw= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/esimonov/ifshort v1.0.2/go.mod h1:yZqNJUrNn20K8Q9n2CrjTKYyVEmX209Hgu+M1LBpeZE= -github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY= -github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= -github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= -github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/color v1.10.0 h1:s36xzo75JdqLaaWoiEHk767eHiwo0598uUxyfiPkDsg= -github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= -github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= +github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= +github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= +github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= +github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= -github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= -github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= -github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3nqZCxaQ2Ze/sM= -github.com/fzipp/gocyclo v0.3.1/go.mod h1:DJHO6AUmbdqj2ET4Z9iArSuwWgYDRryYt2wASxc7x3E= -github.com/gammazero/deque v0.1.0 h1:f9LnNmq66VDeuAlSAapemq/U7hJ2jpIWa4c09q8Dlik= -github.com/gammazero/deque v0.1.0/go.mod h1:KQw7vFau1hHuM8xmI9RbgKFbAsQFWmBpqQ2KenFLk6M= -github.com/gammazero/workerpool v1.1.2 h1:vuioDQbgrz4HoaCi2q1HLlOXdpbap5AET7xu5/qj87g= -github.com/gammazero/workerpool v1.1.2/go.mod h1:UelbXcO0zCIGFcufcirHhq2/xtLXJdQ29qZNlXG9OjQ= -github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/go-critic/go-critic v0.5.6/go.mod h1:cVjj0DfqewQVIlIAGexPCaGaZDAqGE29PYDDADIVNEo= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= -github.com/go-logr/zapr v0.1.1/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= -github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= -github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= -github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/analysis v0.19.4/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/analysis v0.19.5 h1:8b2ZgKfKIUTVQpTb77MoRDIMEIwvDVw40o3aOXdfYzI= -github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= -github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.19.2 h1:a2kIyV3w+OS3S97zxUndRVD46+FhGOUBDFY7nmu4CsY= -github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= -github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= +github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI= +github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/gammazero/deque v0.2.0 h1:SkieyNB4bg2/uZZLxvya0Pq6diUlwx7m2TeT7GAIWaA= +github.com/gammazero/deque v0.2.0/go.mod h1:LFroj8x4cMYCukHJDbxFCkT+r9AndaJnFMuZDV34tuU= +github.com/gammazero/workerpool v1.1.3 h1:WixN4xzukFoN0XSeXF6puqEqFTl2mECI9S6W44HWy9Q= +github.com/gammazero/workerpool v1.1.3/go.mod h1:wPjyBLDbyKnUn2XwwyD3EEwo9dHutia9/fwNmSHWACc= +github.com/gliderlabs/ssh v0.3.7 h1:iV3Bqi942d9huXnzEF2Mt+CY9gLu8DNM4Obd+8bODRE= +github.com/gliderlabs/ssh v0.3.7/go.mod h1:zpHEXBstFnQYtGnB8k8kQLol82umzn/2/snG7alWVD8= +github.com/go-errors/errors v1.5.0 h1:/EuijeGOu7ckFxzhkj4CXJ8JaenxK7bKUxpPYqeLHqQ= +github.com/go-errors/errors v1.5.0/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= +github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= +github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= +github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= +github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= +github.com/go-git/go-git/v5 v5.12.0 h1:7Md+ndsjrzZxbddRDZjF14qK+NN56sy6wkqaVrjZtys= +github.com/go-git/go-git/v5 v5.12.0/go.mod h1:FTM9VKtnI2m65hNI/TenDDDnUf2Q9FHnXYjuz9i5OEY= +github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= +github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= +github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNapBOA= +github.com/go-openapi/analysis v0.21.2 h1:hXFrOYFHUAMQdu6zwAiKKJHJQ8kqZs1ux/ru1P1wLJU= +github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= +github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.20.2 h1:dxy7PGTqEh94zj2E3h1cUmQQWiM1+aeCROfAr02EmK8= +github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= -github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= -github.com/go-openapi/loads v0.19.4 h1:5I4CCSqoWzT+82bBkNIvmLc0UOsoKKQ4Fz+3VxOB7SY= -github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= -github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= -github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= -github.com/go-openapi/runtime v0.19.4 h1:csnOgcgAiuGoM/Po7PEpKDoNulCcF3FGbSnbHfxgjMI= -github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= -github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= -github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= -github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/spec v0.19.4 h1:ixzUSnHTd6hCemgtAJgluaTSGYpLNpJY4mA2DIkdOAo= -github.com/go-openapi/spec v0.19.4/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= -github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/strfmt v0.19.3 h1:eRfyY5SkaNJCAwmmMcADjY31ow9+N7MCLW7oRkbsINA= -github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= -github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ= +github.com/go-openapi/jsonpointer v0.20.0/go.mod h1:6PGzBjjIIumbLYysB73Klnms1mwnU4G3YHOECG3CedA= +github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/loads v0.21.1 h1:Wb3nVZpdEzDTcly8S4HMkey6fjARRzb7iEaySimlDW0= +github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= +github.com/go-openapi/spec v0.20.4 h1:O8hJrt0UMnhHcluhIdUgCLRWyM2x7QkBXRvOs7m+O1M= +github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= +github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= +github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= +github.com/go-openapi/strfmt v0.21.2 h1:5NDNgadiX1Vhemth/TH4gCGopWSTdDjxl60H3B7f+os= +github.com/go-openapi/strfmt v0.21.2/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= -github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= -github.com/go-openapi/validate v0.19.4/go.mod h1:BkJ0ZmXui7yB0bJXWSXgLPNTmbLVeX/3D1xn/N9mMUM= -github.com/go-openapi/validate v0.19.5 h1:QhCBKRYqZR+SKo4gl1lPhPahope8/RLt6EVgY8X80w0= -github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= -github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= +github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/validate v0.21.0 h1:+Wqk39yKOhfpLqNLEC0/eViCkzM5FVXVqrvt526+wcI= +github.com/go-openapi/validate v0.21.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= -github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= -github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= -github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= -github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= -github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= -github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= -github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= -github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= -github.com/go-toolsmith/typep v1.0.2/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= -github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= -github.com/gobuffalo/flect v0.1.5/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= +github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= +github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-test/deep v1.0.7 h1:/VSMRlnY/JSyqxQUzQLKVMAskpY/NZKFA5j2P+0pP2M= +github.com/go-test/deep v1.0.7/go.mod h1:QV8Hv/iy04NyLBxAdO9njL0iVPN1S4d/A3NVv1V36o8= +github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= +github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= +github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= +github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= +github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= +github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= +github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= +github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= +github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= +github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= +github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= +github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= +github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= +github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= +github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= +github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/flock v0.8.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= -github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v0.0.0-20181025225059-d3de96c4c28e/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= -github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= -github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= -github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= -github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= -github.com/golangci/golangci-lint v1.40.1/go.mod h1:OyFTr1muxaWeGTcHQcL3B7C4rETnDphTKYenZDgH2/g= -github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= -github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= -github.com/golangci/misspell v0.3.5/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= -github.com/golangci/revgrep v0.0.0-20210208091834-cd28932614b5/go.mod h1:LK+zW4MpyytAWQRz0M4xnzEk50lSvqDQKfx304apFkY= -github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= -github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAONeMXrxql8uvOKuAZSu8aM5RUGv+1C6IJaEho= -github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995/go.mod h1:lJgMEyOkYFkPcDKwRXegd+iM6E7matEszMG5HhwytU8= -github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= -github.com/google/certificate-transparency-go v1.1.1/go.mod h1:FDKqPvSXawb2ecErVRrD+nfy23RCzyl7eqVCEmlT1Zs= +github.com/gomarkdown/markdown v0.0.0-20241205020045-f7e15b2f3e62 h1:pbAFUZisjG4s6sxvRJvf2N7vhpCvx2Oxb3PmS6pDO1g= +github.com/gomarkdown/markdown v0.0.0-20241205020045-f7e15b2f3e62/go.mod h1:JDGcbDT52eL4fju3sZ4TeHGsQwhG9nbDV21aMyhwPoA= +github.com/google/cel-go v0.12.6 h1:kjeKudqV0OygrAqA9fX6J55S8gj+Jre2tckIm5RoG4M= +github.com/google/cel-go v0.12.6/go.mod h1:Jk7ljRzLBhkmiAwBoUxB1sZSCVBAzkqPF25olK/iRDw= github.com/google/flatbuffers v1.12.1 h1:MVlul7pQNoDzWRLTw5imwYsl+usrS1TXG2H4jg6ImGw= github.com/google/flatbuffers v1.12.1/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= -github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-github/v64 v64.0.0 h1:4G61sozmY3eiPAjjoOHponXDBONm+utovTKbyUb2Qdg= +github.com/google/go-github/v64 v64.0.0/go.mod h1:xB3vqMQNdHzilXBiO2I+M7iEFtHf+DP/omBOv6tQzVo= +github.com/google/go-github/v68 v68.0.0 h1:ZW57zeNZiXTdQ16qrDiZ0k6XucrxZ2CGmoTvcCyQG6s= +github.com/google/go-github/v68 v68.0.0/go.mod h1:K9HAUBovM2sLwM408A18h+wd9vqdLOEqTUCbnRIcx68= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200507031123-427632fa3b1c/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/trillian v1.3.11/go.mod h1:0tPraVHrSDkA3BO6vKX67zgLXs6SsOAbHEivX+9mPgw= -github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= +github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= +github.com/google/pprof v0.0.0-20220318212150-b2ab0324ddda h1:KdHPvlgeNEDs8rae032MqFG8LVwcSEivcCjNdVOXRmg= +github.com/google/pprof v0.0.0-20220318212150-b2ab0324ddda/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= +github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= +github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/googleapis/gax-go/v2 v2.1.1 h1:dp3bWCh+PPO1zjRRiCSczJav13sBvG4UhNyVTa1KqdU= -github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.3.1 h1:WeAefnSUHlBb0iJKwxFDZdbfGwkd7xRNuV+IpXMJhYk= -github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= -github.com/gookit/color v1.3.8/go.mod h1:R3ogXq2B9rTbXoSHJ1HyUVAZ3poOJHpd9nQmyGZsfvQ= -github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= -github.com/gordonklaus/ineffassign v0.0.0-20210225214923-2e10b2664254/go.mod h1:M9mZEtGIsR1oDaZagNPNG9iq9n2HrhZ17dsXk73V3Lw= -github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v0.0.0-20181024020800-521ea7b17d02/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= -github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= -github.com/gostaticanalysis/analysisutil v0.1.0/go.mod h1:dMhHRU9KTiDcuLGdy87/2gTR8WruwYZrKdRq9m1O6uw= -github.com/gostaticanalysis/analysisutil v0.4.1/go.mod h1:18U/DLpRgIUd459wGxVHE0fRgmo1UgHDcbw7F5idXu0= -github.com/gostaticanalysis/comment v1.3.0/go.mod h1:xMicKDx7XRXYdVwY9f9wQpDJVnqWxw9wCauCMKp+IBI= -github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= -github.com/gostaticanalysis/forcetypeassert v0.0.0-20200621232751-01d4955beaa5/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak= -github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= -github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v0.0.0-20190222133341-cfaf5686ec79/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.3.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= -github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= +github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= +github.com/googleapis/gax-go/v2 v2.14.0 h1:f+jMrjBPl+DL9nI4IQzLUxMq7XrAqFYB7hBPqMNIe8o= +github.com/googleapis/gax-go/v2 v2.14.0/go.mod h1:lhBCnjdLrWRaPvLWhmc8IS24m9mr07qSYnHncrgo+zk= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI= +github.com/gruntwork-io/terratest v0.48.1 h1:pnydDjkWbZCUYXvQkr24y21fBo8PfJC5hRGdwbl1eXM= +github.com/gruntwork-io/terratest v0.48.1/go.mod h1:U2EQW4Odlz75XJUH16Kqkr9c93p+ZZtkpVez7GkZFa4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-getter/v2 v2.2.3 h1:6CVzhT0KJQHqd9b0pK3xSP0CM/Cv+bVhk+jcaRJ2pGk= +github.com/hashicorp/go-getter/v2 v2.2.3/go.mod h1:hp5Yy0GMQvwWVUmwLs3ygivz1JSLI323hdIE9J9m7TY= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= +github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= -github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= -github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ= -github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/inconshreveable/log15 v0.0.0-20201112154412-8562bdadbbac h1:n1DqxAo4oWPMvH1+v+DLYlMCecgumhhgnxAPdqDIFHI= -github.com/inconshreveable/log15 v0.0.0-20201112154412-8562bdadbbac/go.mod h1:cOaXtrgN4ScfRrD9Bre7U1thNq5RtJ8ZoP4iXVGRj6o= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/hashicorp/hcl/v2 v2.23.0 h1:Fphj1/gCylPxHutVSEOf2fBOh1VE4AuLV7+kbJf3qos= +github.com/hashicorp/hcl/v2 v2.23.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA= +github.com/hashicorp/terraform-config-inspect v0.0.0-20241129133400-c404f8227ea6 h1:146llE+6P/9YO8RcHRehzGNiS9+OoirKW9/aML6/JIA= +github.com/hashicorp/terraform-config-inspect v0.0.0-20241129133400-c404f8227ea6/go.mod h1:Gz/z9Hbn+4KSp8A2FBtNszfLSdT2Tn/uAKGuVqqWmDI= +github.com/hashicorp/terraform-json v0.24.0 h1:rUiyF+x1kYawXeRth6fKFm/MdfBS6+lW4NbeATsYz8Q= +github.com/hashicorp/terraform-json v0.24.0/go.mod h1:Nfj5ubo9xbu9uiAoZVBsNOjvNKB66Oyrvtit74kC7ow= +github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= +github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/inconshreveable/log15 v2.16.0+incompatible h1:6nvMKxtGcpgm7q0KiGs+Vc+xDvUXaBqsPKHWKsinccw= +github.com/inconshreveable/log15 v2.16.0+incompatible/go.mod h1:cOaXtrgN4ScfRrD9Bre7U1thNq5RtJ8ZoP4iXVGRj6o= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/jgautheron/goconst v1.4.0/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= -github.com/jhump/protoreflect v1.6.1/go.mod h1:RZQ/lnuN+zqeRVpQigTwO6o0AJUkxbnSnpuG7toUTG4= -github.com/jingyugao/rowserrcheck v0.0.0-20210315055705-d907ca737bb1/go.mod h1:TOQpc2SLx6huPfoFGK3UOnEG+u02D3C1GeosjupAKCA= -github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jonboulle/clockwork v0.2.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/julz/importas v0.0.0-20210419104244-841f0c0fe66d/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= -github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/invopop/jsonschema v0.12.0 h1:6ovsNSuvn9wEQVOyc72aycBMVQFKz7cPdMJn10CvzRI= +github.com/invopop/jsonschema v0.12.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0= +github.com/itchyny/json2yaml v0.1.4 h1:/pErVOXGG5iTyXHi/QKR4y3uzhLjGTEmmJIy97YT+k8= +github.com/itchyny/json2yaml v0.1.4/go.mod h1:6iudhBZdarpjLFRNj+clWLAkGft+9uCcjAZYXUH9eGI= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= +github.com/jedib0t/go-pretty v4.3.0+incompatible h1:CGs8AVhEKg/n9YbUenWmNStRW2PHJzaeDodcfvRAbIo= +github.com/jedib0t/go-pretty v4.3.0+incompatible/go.mod h1:XemHduiw8R651AF9Pt4FwCTKeG3oo7hrHJAoznj9nag= +github.com/jedib0t/go-pretty/v6 v6.6.5 h1:9PgMJOVBedpgYLI56jQRJYqngxYAAzfEUua+3NgSqAo= +github.com/jedib0t/go-pretty/v6 v6.6.5/go.mod h1:Uq/HrbhuFty5WSVNfjpQQe47x16RwVGXIveNGEyGtHs= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jinzhu/copier v0.4.0 h1:w3ciUoD19shMCRargcpm0cm91ytaBhDvuRpz1ODO/U8= +github.com/jinzhu/copier v0.4.0/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= +github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= +github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/errcheck v1.6.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.13.5 h1:9O69jUPDcsT9fEm74W92rZL9FQY7rCdaXVneq+yyzl4= -github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kulti/thelper v0.4.0/go.mod h1:vMu2Cizjy/grP+jmsvOFDx1kYP6+PD1lqg4Yu5exl2U= -github.com/kunwardeep/paralleltest v1.0.2/go.mod h1:ZPqNm1fVHPllh5LPVujzbVz1JN2GhLxSfY+oqUsvG30= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg= -github.com/ldez/gomoddirectives v0.2.1/go.mod h1:sGicqkRgBOg//JfpXwkB9Hj0X5RyJ7mlACM5B9f6Me4= -github.com/ldez/tagliatelle v0.2.0/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3mFukHs88= -github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= -github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= -github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= -github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= -github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls= -github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= -github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM= -github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU= -github.com/matoous/godox v0.0.0-20210227103229-6504466cf951/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= -github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-runewidth v0.0.0-20181025052659-b20a3daf6a39/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= -github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= -github.com/mgechev/revive v1.0.6/go.mod h1:Lj5gIVxjBlH8REa3icEOkdfchwYc291nShzZ4QYWyMo= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= -github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= +github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= +github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= +github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= +github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/mattn/go-zglob v0.0.4 h1:LQi2iOm0/fGgu80AioIJ/1j9w9Oh+9DZ39J4VAGzHQM= +github.com/mattn/go-zglob v0.0.4/go.mod h1:MxxjyoXXnMxfIpxTK2GAkw1w8glPsQILx3N5wrKakiY= +github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ= +github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ= +github.com/migueleliasweb/go-github-mock v1.1.0 h1:GKaOBPsrPGkAKgtfuWY8MclS1xR6MInkx1SexJucMwE= +github.com/migueleliasweb/go-github-mock v1.1.0/go.mod h1:pYe/XlGs4BGMfRY4vmeixVsODHnVDDhJ9zoi0qzSMHc= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/go-testing-interface v1.14.2-0.20210821155943-2d9075ca8770 h1:drhDO54gdT/a15GBcMRmunZiNcLgPiFIJa23KzmcvcU= +github.com/mitchellh/go-testing-interface v1.14.2-0.20210821155943-2d9075ca8770/go.mod h1:SO/iHr6q2EzbqRApt+8/E9wqebTwQn5y+UlB04bxzo0= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7pjMO+rqo= -github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mna/pigeon v0.0.0-20180808201053-bb0192cfc2ae/go.mod h1:Iym28+kJVnC1hfQvv5MUtI6AiFFzvQjHcvI4RFTG/04= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= -github.com/moricho/tparallel v0.2.1/go.mod h1:fXEIZxG2vdfl0ZF8b42f5a78EhjjD5mX8qUplsoSU4k= -github.com/mozilla/scribe v0.0.0-20180711195314-fb71baf557c1/go.mod h1:FIczTrinKo8VaLxe6PWTPEXRXDIHz2QAwiaBaP5/4a8= -github.com/mozilla/tls-observatory v0.0.0-20210209181001-cf43108d6880/go.mod h1:FUqVoUPHSEdDR0MnFM3Dh8AU0pZHLXUD127SAJGER/s= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007/go.mod h1:m2XC9Qq0AlmmVksL6FktJCdTYyLk7V3fKyp0sl1yWQo= -github.com/mwitkow/go-proto-validators v0.2.0/go.mod h1:ZfA1hW+UH/2ZHOWvQ3HnQaU0DtnpXu850MZiy+YUgcc= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/nakabonne/nestif v0.3.0/go.mod h1:dI314BppzXjJ4HsCnbo7XzrJHPszZsjnk5wEBSYHI2c= -github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= -github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= -github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= -github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= -github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nishanths/exhaustive v0.1.0/go.mod h1:S1j9110vxV1ECdCudXRkeMnFQ/DQk9ajLT0Uf2MYZQQ= -github.com/nishanths/predeclared v0.0.0-20190419143655-18a43bb90ffc/go.mod h1:62PewwiQTlm/7Rj+cxVYqZvDIUc+JjZq6GHAC1fsObQ= -github.com/nishanths/predeclared v0.2.1/go.mod h1:HvkGJcA3naj4lOwnFXFDkFxVtSqQMB9sbB1usJ+xjQE= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2fSBUmeGDbRWPxyQ= -github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.4.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.15.0 h1:1V1NfVQR87RtWAgp1lv9JZJ5Jap+XFGKPi00andXGi4= -github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.3.0/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.5 h1:7n6FEkpFmfCoo2t+YYqXH0evK+a9ICQz0xcAy9dYcaQ= -github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= -github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/open-policy-agent/frameworks/constraint v0.0.0-20200127222620-69dff9b895a2/go.mod h1:RYtG2t6a5nly9IWFMNgq/K29YzYMV9ftA7XlGOM75bs= -github.com/open-policy-agent/frameworks/constraint v0.0.0-20210422220901-804ff2ee8b4f h1:oEq3M/aUJbcx9LVuL+UQXhOl3xDwhbNB7WFQtWZdh4o= -github.com/open-policy-agent/frameworks/constraint v0.0.0-20210422220901-804ff2ee8b4f/go.mod h1:vvhkBONv7Uah2fvS/bQ/N1u0rSLvxZOs2ErR6m+4QtQ= -github.com/open-policy-agent/gatekeeper v0.0.0-20200130050101-a7990e5bc83a h1:VG+QBBJIURLRWFitdRxAMrxo0saatgWC9wZ7C7O2rPI= -github.com/open-policy-agent/gatekeeper v0.0.0-20200130050101-a7990e5bc83a/go.mod h1:KUnD20sANxG0aZ58e0juTnkdGsVUzWclPiIcprso5gk= -github.com/open-policy-agent/opa v0.16.2/go.mod h1:P0xUE/GQAAgnvV537GzA0Ikw4+icPELRT327QJPkaKY= -github.com/open-policy-agent/opa v0.24.0/go.mod h1:qEyD/i8j+RQettHGp4f86yjrjvv+ZYia+JHCMv2G7wA= -github.com/open-policy-agent/opa v0.29.3/go.mod h1:ZCOTD3yyFR8JvF8ETdWdiSPn9WcF1dXeQWOv7VoPorU= -github.com/open-policy-agent/opa v0.34.2 h1:asRmfDRUSd8gwPNRrpUsDxwOUkxLgc1x1FYkwjcnag4= -github.com/open-policy-agent/opa v0.34.2/go.mod h1:buysXn+6zB/b+6JgLkP4WgKZ9+UgUtFAgtemYGrL9Ik= -github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= -github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= -github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= -github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/peterh/liner v0.0.0-20170211195444-bf27d3ba8e1d/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= -github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= -github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/errors v0.0.0-20181023235946-059132a15dd0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q= +github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k= +github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= +github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= +github.com/open-policy-agent/frameworks/constraint v0.0.0-20230712214810-96753a21c26f h1:dJDnp6A6LBrU/hbve5NzZNV3OzPYXdD0SJUn+xAPj+I= +github.com/open-policy-agent/frameworks/constraint v0.0.0-20230712214810-96753a21c26f/go.mod h1:54/KzLMvA5ndBVpm7B1OjLeV0cUtTLTz2bZ2OtydLpU= +github.com/open-policy-agent/gatekeeper/v3 v3.13.0 h1:UUfIo/ZjLa0D6BBQlnSjlZetcAYbp54fZVVCLug4sY0= +github.com/open-policy-agent/gatekeeper/v3 v3.13.0/go.mod h1:umWn30oYZ4CGW0kOD7aeIfPwbhCQ9DibK2LTUrRW+bk= +github.com/open-policy-agent/opa v0.70.0 h1:B3cqCN2iQAyKxK6+GI+N40uqkin+wzIrM7YA60t9x1U= +github.com/open-policy-agent/opa v0.70.0/go.mod h1:Y/nm5NY0BX0BqjBriKUiV81sCl8XOjjvqQG7dXrggtI= +github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= +github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= +github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks= +github.com/otiai10/mint v1.5.1/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM= +github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= +github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= +github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= +github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= -github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/polyfloyd/go-errorlint v0.0.0-20210418123303-74da32850375/go.mod h1:wi9BfjxjF/bwiZ701TzmfKu6UKC357IOAtNr0Td0Lvw= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= -github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= -github.com/prometheus/client_golang v0.0.0-20181025174421-f30f42803563/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181020173914-7e9e6cabbd39/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.14.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/pseudomuto/protoc-gen-doc v1.3.2/go.mod h1:y5+P6n3iGrbKG+9O04V5ld71in3v/bX88wUwgt+U8EA= -github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q= -github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= -github.com/quasilyte/go-ruleguard v0.3.1-0.20210203134552-1b5a410e1cc8/go.mod h1:KsAh3x0e7Fkpgs+Q9pNLS5XpFSvYCEVl5gP9Pp1xp30= -github.com/quasilyte/go-ruleguard v0.3.4/go.mod h1:57FZgMnoo6jqxkYKmVj5Fc8vOt0rVzoE/UNAmFFIPqA= -github.com/quasilyte/go-ruleguard/dsl v0.3.0/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= -github.com/quasilyte/go-ruleguard/dsl v0.3.2/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= -github.com/quasilyte/go-ruleguard/rules v0.0.0-20201231183845-9e62ed36efe1/go.mod h1:7JTjp89EGyU1d6XfBiXihJNG37wB2VRkd125Q1u7Plc= -github.com/quasilyte/go-ruleguard/rules v0.0.0-20210203162857-b223e0831f88/go.mod h1:4cgAphtvu7Ftv7vOT2ZOYhC6CvBxZixcasr8qIOTA50= -github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= -github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryancurrah/gomodguard v1.2.0/go.mod h1:rNqbC4TOIdUDcVMSIpNNAzTbzXAZa6W5lnUepvuMMgQ= -github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sagikazarmark/crypt v0.1.0/go.mod h1:B/mN0msZuINBtQ1zZLEQcegFJJf9vnYIR88KRMEuODE= -github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/sanposhiho/wastedassign v1.0.0/go.mod h1:LGpq5Hsv74QaqM47WtIsRSF/ik9kqk07kchgv66tLVE= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/securego/gosec/v2 v2.7.0/go.mod h1:xNbGArrGUspJLuz3LS5XCY1EBW/0vABAl/LWfSklmiM= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= -github.com/shirou/gopsutil/v3 v3.21.4/go.mod h1:ghfMypLDrFSWN2c9cDYFLHyynQ+QUht0cv/18ZqVczw= -github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= -github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= +github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/smallfish/simpleyaml v0.0.0-20170911015856-a32031077861 h1:9z0Ip656Pc+3cj/BpHkErOVg4iE0xcAdvJwfA3hMVzU= -github.com/smallfish/simpleyaml v0.0.0-20170911015856-a32031077861/go.mod h1:eGZ1jp5PTJ+XVhTErUmw0xyPbgctPFlixWPypUrDkSs= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.3/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sonatard/noctx v0.0.1/go.mod h1:9D2D/EoULe8Yy2joDHJj7bv3sZoq9AaSb8B4lqBjiZI= -github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= -github.com/sourcegraph/go-diff v0.6.1/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.0-20181021141114-fe5e611709b0/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/skeema/knownhosts v1.2.2 h1:Iug2P4fLmDw9f41PB6thxUkNUkJzB5i+1/exaj40L3A= +github.com/skeema/knownhosts v1.2.2/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= -github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= -github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw= -github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= -github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v0.0.0-20181024212040-082b515c9490/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= -github.com/spf13/viper v1.9.0 h1:yR6EXjTp0y0cLN8OZg1CRZmOBdI88UcGkhgyJhu6nZk= -github.com/spf13/viper v1.9.0/go.mod h1:+i6ajR7OX2XaiBkrcZJFK21htRk7eDeLg7+O6bhUPP4= -github.com/ssgreg/nlreturn/v2 v2.1.0/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= -github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= +github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= +github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= -github.com/tetafro/godot v1.4.6/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= -github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes= +github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= +github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= -github.com/tklauser/go-sysconf v0.3.4/go.mod h1:Cl2c8ZRWfHD5IrfHo9VN+FX9kCFjIOyVklgXycLB6ek= -github.com/tklauser/numcpus v0.2.1/go.mod h1:9aU+wOc6WjUIZEwWMP62PL/41d65P+iks1gBkr4QyP8= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tomarrell/wrapcheck/v2 v2.1.0/go.mod h1:crK5eI4RGSUrb9duDTQ5GqcukbKZvi85vX6nbhsBAeI= -github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= -github.com/tommy-muehle/go-mnd/v2 v2.3.2/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= -github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/uudashr/gocognit v1.0.1/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM= -github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.16.0/go.mod h1:YOKImeEosDdBPnxc0gy7INqi3m1zK6A+xl6TwOBhHCA= -github.com/valyala/quicktemplate v1.6.3/go.mod h1:fwPzK2fHuYEODzJ9pkw0ipCPNHZ2tD5KW4lOuSdPKzY= -github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= -github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= -github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= +github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= +github.com/tmccombs/hcl2json v0.6.4 h1:/FWnzS9JCuyZ4MNwrG4vMrFrzRgsWEOVi+1AyYUVLGw= +github.com/tmccombs/hcl2json v0.6.4/go.mod h1:+ppKlIW3H5nsAsZddXPy2iMyvld3SHxyjswOZhavRDk= +github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= +github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc= +github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw= +github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= +github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= +github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSfTONNIgpN5RA8prR7fF8nkF6cTWTcNerRO8= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yashtewari/glob-intersection v0.0.0-20180916065949-5c77d914dd0b h1:vVRagRXf67ESqAb72hG2C/ZwI8NtJF2u2V76EsuOHGY= -github.com/yashtewari/glob-intersection v0.0.0-20180916065949-5c77d914dd0b/go.mod h1:HptNXiXVDcJjXe9SqMd0v2FsL9f8dz4GnXgltU6q/co= -github.com/yeya24/promlinter v0.1.0/go.mod h1:rs5vtZzeBHqqMwXqFScncpCF6u06lezhZepno9AB1Oc= -github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= -github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= -github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/yashtewari/glob-intersection v0.2.0 h1:8iuHdN88yYuCzCdjt0gDe+6bAhUwBeEWqThExu54RFg= +github.com/yashtewari/glob-intersection v0.2.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= -go.etcd.io/etcd v0.0.0-20200513171258-e048e166ab9c/go.mod h1:xCI7ZzBfRuGgBXyXO6yfWfDmlWd35khcWpUa4L0xI/k= -go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= -go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.2 h1:jxcFYjlkl8xaERsgLo+RNquI0epW6zuy/ZRQs6jnrFA= -go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mozilla.org/mozlog v0.0.0-20170222151521-4bb13139d403/go.mod h1:jHoPAGnDrCy6kaI2tAze5Prf0Nr0w/oNkROt2lw3n3o= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/automaxprocs v1.4.0/go.mod h1:/mTEdr7LvHhs0v7mjdxDreTz1OG5zdZGqgOnhWiR/+Q= -go.uber.org/multierr v0.0.0-20180122172545-ddea229ff1df/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zclconf/go-cty v1.15.1 h1:RgQYm4j2EvoBRXOPxhUvxPzRrGDo1eCOhHXuGfrj5S0= +github.com/zclconf/go-cty v1.15.1/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo= +github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM= +go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= +go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= +go.mongodb.org/mongo-driver v1.8.3 h1:TDKlTkGDKm9kkJVUOAXDK5/fkqKHJVwYQSpoRfB43R4= +go.mongodb.org/mongo-driver v1.8.3/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/detectors/gcp v1.31.0 h1:G1JQOreVrfhRkner+l4mrGxmfqYCAuy76asTDAo0xsA= +go.opentelemetry.io/contrib/detectors/gcp v1.31.0/go.mod h1:tzQL6E1l+iV44YFTkcAeNQqzXUiekSYP9jjJjXwEd00= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= +go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= +go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0 h1:WDdP9acbMYjbKIyJUhTvtzj601sVJOqgWdUxSdR/Ysc= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0/go.mod h1:BLbf7zbNIONBLPwvFnwNHGj4zge8uTCM/UPIVW1Mq2I= +go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= +go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= +go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= +go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= +go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc= +go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= +go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= +go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= +go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= +go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= +go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= +go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= +golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 h1:HWj/xjIHfjYU5nVXpTM0s39J9CbLn7Cc5a7IC5rwsMQ= -golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181023182221-1baf3a9d7d67/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c h1:7dEasQXItcW1xKJ2+gg5VOiBnqWrJc+rq0DPKyvvdbY= +golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c/go.mod h1:NQtJDoLvd6faHhE7m4T/1IY708gDefGGjR/iUW8yQQ8= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180112015858-5ccada7d0a7b/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200927032502-5d4f70055728/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201016165138-7b1cca2348c0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210825183410-e898025ed96a h1:bRuuGXV8wwSdGTB+CtJf+FjgO1APK1CoO39T4BN/XBw= -golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1 h1:B333XXssMuKQeBwiNODx4TupZy7bf4sxFZnN2ZOcvUE= -golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= +golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180117170059-2c42eef0765b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201018230417-eeed37f84f13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210217105451-b926d437f341/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210228012217-479acdf4ea46/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210917161153-d61c044b1678 h1:J27LZFQBFoihqXoegpscI10HpjZ7B5WQLLKL2FZXQKw= -golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20171227012246-e19ae1496984/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190307163923-6a08e3108db3/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190321232350-e250d351ecad/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190916130336-e45ffcd953cc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117220505-0cba7a3a9ee9/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200422022333-3d57cf2e726e/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200622203043-20e05c1c8ffa/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200624225443-88f3c62a19ff/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200625211823-6506e20df31f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200626171337-aa94e735be7f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200630154851-b2d8b0336632/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200706234117-b22de6825cf7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200812195022-5ae4c3c160a0/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200831203904-5a2aa26beb65/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201002184944-ecd9fd270d5d/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201011145850-ed2f50202694/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201017001424-6003fad69a88/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201028025901-8cd080b735b3/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201114224030-61ea331ec02b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201118003311-bd56c0adb394/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201230224404-63754364767c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210101214203-2dba1e4ea05c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210104081019-d8d6ddbec6ee/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2-0.20210512205948-8287d5da45e4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= +golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= -gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.58.0 h1:MDkAbYIB1JpSgCTOCYYoIec/coMlKK4oVbpnBLLcyT0= -google.golang.org/api v0.58.0/go.mod h1:cAbP2FsxoGVNwtgNAmmn3y5G1TWAiVYRmg4yku3lv+E= +gomodules.xyz/jsonpatch/v2 v2.3.0 h1:8NFhfS6gzxNqjLIYnZxg319wZ5Qjnx4m/CcX+Klzazc= +gomodules.xyz/jsonpatch/v2 v2.3.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/api v0.214.0 h1:h2Gkq07OYi6kusGOaT/9rnNljuXmqPnaig7WGPmKbwA= +google.golang.org/api v0.214.0/go.mod h1:bYPpLG8AyeMWwDU6NXoB00xC0DFkikVvd5MfwoxjLqE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181107211654-5fc9ac540362/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200626011028-ee7919e894b5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200707001353-8e8330bf89df/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211016002631-37fc39342514/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12 h1:DN5b3HU13J4sMd/QjDx34U6afpaexKTDdop+26pdjdk= -google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 h1:ToEetK57OidYuqD4Q5w+vfEnPvPpuTwedCNVohYJfNk= +google.golang.org/genproto v0.0.0-20241118233622-e639e219e697/go.mod h1:JJrvXBWRZaFMxBufik1a4RpFw4HhgVtBBWQeQgUj2cc= +google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q= +google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.0/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0= +google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw= +google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a h1:UIpYSuWdWHSzjwcAFRLjKcPXFZVVLXGEM23W+NWqipw= +google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a/go.mod h1:9i1T9n4ZinTUZGgzENMi8MDDgbGC5mqTS75JAv6xN3A= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1552,128 +764,63 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= +google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.63.2 h1:tGK/CyBg7SMzb60vP1M03vNZ3VDu3wGQJwn7Sxi9r3c= -gopkg.in/ini.v1 v1.63.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.0.0/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.6/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.1.4/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= -k8s.io/api v0.0.0-20190918155943-95b840bb6a1f/go.mod h1:uWuOHnjmNrtQomJrvEBg0c0HRNyQ+8KTEERVsK0PW48= -k8s.io/api v0.16.4/go.mod h1:AtzMnsR45tccQss5q8RnF+W8L81DH6XwXwo/joEx9u0= -k8s.io/api v0.17.2 h1:NF1UFXcKN7/OOv1uxdRz3qfra8AHsPav5M93hlV9+Dc= -k8s.io/api v0.17.2/go.mod h1:BS9fjjLc4CMuqfSO8vgbHPKMt5+SF0ET6u/RVDihTo4= -k8s.io/apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783/go.mod h1:xvae1SZB3E17UpV59AWc271W/Ph25N+bjPyR63X6tPY= -k8s.io/apiextensions-apiserver v0.16.4/go.mod h1:HYQwjujEkXmQNhap2C9YDdIVOSskGZ3et0Mvjcyjbto= -k8s.io/apiextensions-apiserver v0.17.2 h1:cP579D2hSZNuO/rZj9XFRzwJNYb41DbNANJb6Kolpss= -k8s.io/apiextensions-apiserver v0.17.2/go.mod h1:4KdMpjkEjjDI2pPfBA15OscyNldHWdBCfsWMDWAmSTs= -k8s.io/apimachinery v0.0.0-20190913080033-27d36303b655/go.mod h1:nL6pwRT8NgfF8TT68DBI8uEePRt89cSvoXUVqbkWHq4= -k8s.io/apimachinery v0.16.4/go.mod h1:llRdnznGEAqC3DcNm6yEj472xaFVfLM7hnYofMb12tQ= -k8s.io/apimachinery v0.17.2 h1:hwDQQFbdRlpnnsR64Asdi55GyCaIP/3WQpMmbNBeWr4= -k8s.io/apimachinery v0.17.2/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= -k8s.io/apiserver v0.0.0-20190918160949-bfa5e2e684ad/go.mod h1:XPCXEwhjaFN29a8NldXA901ElnKeKLrLtREO9ZhFyhg= -k8s.io/apiserver v0.16.4/go.mod h1:kbLJOak655g6W7C+muqu1F76u9wnEycfKMqbVaXIdAc= -k8s.io/apiserver v0.17.2 h1:NssVvPALll6SSeNgo1Wk1h2myU1UHNwmhxV0Oxbcl8Y= -k8s.io/apiserver v0.17.2/go.mod h1:lBmw/TtQdtxvrTk0e2cgtOxHizXI+d0mmGQURIHQZlo= -k8s.io/cli-runtime v0.17.2/go.mod h1:aa8t9ziyQdbkuizkNLAw3qe3srSyWh9zlSB7zTqRNPI= -k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90/go.mod h1:J69/JveO6XESwVgG53q3Uz5OSfgsv4uxpScmmyYOOlk= -k8s.io/client-go v0.16.4/go.mod h1:ZgxhFDxSnoKY0J0U2/Y1C8obKDdlhGPZwA7oHH863Ok= -k8s.io/client-go v0.17.2 h1:ndIfkfXEGrNhLIgkr0+qhRguSD3u6DCmonepn1O6NYc= -k8s.io/client-go v0.17.2/go.mod h1:QAzRgsa0C2xl4/eVpeVAZMvikCn8Nm81yqVx3Kk9XYI= -k8s.io/code-generator v0.0.0-20190912054826-cd179ad6a269/go.mod h1:V5BD6M4CyaN5m+VthcclXWsVcT1Hu+glwa1bi3MIsyE= -k8s.io/code-generator v0.16.4/go.mod h1:mJUgkl06XV4kstAnLHAIzJPVCOzVR+ZcfPIv4fUsFCY= -k8s.io/code-generator v0.17.2/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s= -k8s.io/component-base v0.0.0-20190918160511-547f6c5d7090/go.mod h1:933PBGtQFJky3TEwYx4aEPZ4IxqhWh3R6DCmzqIn1hA= -k8s.io/component-base v0.16.4/go.mod h1:GYQ+4hlkEwdlpAp59Ztc4gYuFhdoZqiAJD1unYDJ3FM= -k8s.io/component-base v0.17.2 h1:0XHf+cerTvL9I5Xwn9v+0jmqzGAZI7zNydv4tL6Cw6A= -k8s.io/component-base v0.17.2/go.mod h1:zMPW3g5aH7cHJpKYQ/ZsGMcgbsA/VyhEugF3QT1awLs= -k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= -k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= -k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= -k8s.io/kubectl v0.17.2 h1:QZR8Q6lWiVRjwKslekdbN5WPMp53dS/17j5e+oi5XVU= -k8s.io/kubectl v0.17.2/go.mod h1:y4rfLV0n6aPmvbRCqZQjvOp3ezxsFgpqL+zF5jH/lxk= -k8s.io/metrics v0.17.2/go.mod h1:3TkNHET4ROd+NfzNxkjoVfQ0Ob4iZnaHmSEA4vYpwLw= -k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20191114184206-e782cd3c129f h1:GiPwtSzdP43eI1hpPCbROQCCIgCuiMMNF8YUVLF3vJo= -k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= -modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= -modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= -modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= -modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= -mvdan.cc/gofumpt v0.1.1/go.mod h1:yXG1r1WqZVKWbVRtBWKWX9+CxGYfA51nSomhM0woR48= -mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= -mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= -mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7/go.mod h1:hBpJkZE8H/sb+VRFvw2+rBpHNsTBcvSpk61hr8mzXZE= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/controller-runtime v0.4.0 h1:wATM6/m+3w8lj8FXNaO6Fs/rq/vqoOjO1Q116Z9NPsg= -sigs.k8s.io/controller-runtime v0.4.0/go.mod h1:ApC79lpY3PHW9xj/w9pj+lYkLgwAAUZwfXkME1Lajns= -sigs.k8s.io/controller-tools v0.2.2/go.mod h1:8SNGuj163x/sMwydREj7ld5mIMJu1cDanIfnx6xsU70= -sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= -sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= -sigs.k8s.io/structured-merge-diff v0.0.0-20190817042607-6149e4549fca/go.mod h1:IIgPezJWb76P0hotTxzDbWsMYB8APh18qZnxkomBpxA= -sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18= -sigs.k8s.io/structured-merge-diff v1.0.1/go.mod h1:IIgPezJWb76P0hotTxzDbWsMYB8APh18qZnxkomBpxA= -sigs.k8s.io/testing_frameworks v0.1.2 h1:vK0+tvjF0BZ/RYFeZ1E6BYBwHJJXhjuZ3TdsEKH+UQM= -sigs.k8s.io/testing_frameworks v0.1.2/go.mod h1:ToQrwSC3s8Xf/lADdZp3Mktcql9CG0UAmdJG9th5i0w= -sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= -vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI= +k8s.io/api v0.28.4 h1:8ZBrLjwosLl/NYgv1P7EQLqoO8MGQApnbgH8tu3BMzY= +k8s.io/api v0.28.4/go.mod h1:axWTGrY88s/5YE+JSt4uUi6NMM+gur1en2REMR7IRj0= +k8s.io/apiextensions-apiserver v0.27.2 h1:iwhyoeS4xj9Y7v8YExhUwbVuBhMr3Q4bd/laClBV6Bo= +k8s.io/apiextensions-apiserver v0.27.2/go.mod h1:Oz9UdvGguL3ULgRdY9QMUzL2RZImotgxvGjdWRq6ZXQ= +k8s.io/apimachinery v0.28.4 h1:zOSJe1mc+GxuMnFzD4Z/U1wst50X28ZNsn5bhgIIao8= +k8s.io/apimachinery v0.28.4/go.mod h1:wI37ncBvfAoswfq626yPTe6Bz1c22L7uaJ8dho83mgg= +k8s.io/apiserver v0.27.2 h1:p+tjwrcQEZDrEorCZV2/qE8osGTINPuS5ZNqWAvKm5E= +k8s.io/apiserver v0.27.2/go.mod h1:EsOf39d75rMivgvvwjJ3OW/u9n1/BmUMK5otEOJrb1Y= +k8s.io/client-go v0.28.4 h1:Np5ocjlZcTrkyRJ3+T3PkXDpe4UpatQxj85+xjaD2wY= +k8s.io/client-go v0.28.4/go.mod h1:0VDZFpgoZfelyP5Wqu0/r/TRYcLYuJ2U1KEeoaPa1N4= +k8s.io/component-base v0.27.2 h1:neju+7s/r5O4x4/txeUONNTS9r1HsPbyoPBAtHsDCpo= +k8s.io/component-base v0.27.2/go.mod h1:5UPk7EjfgrfgRIuDBFtsEFAe4DAvP3U+M8RTzoSJkpo= +k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= +k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/kubectl v0.27.2 h1:sSBM2j94MHBFRWfHIWtEXWCicViQzZsb177rNsKBhZg= +k8s.io/kubectl v0.27.2/go.mod h1:GCOODtxPcrjh+EC611MqREkU8RjYBh10ldQCQ6zpFKw= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 h1:trsWhjU5jZrx6UvFu4WzQDrN7Pga4a7Qg+zcfcj64PA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2/go.mod h1:+qG7ISXqCDVVcyO8hLn12AKVYYUjM7ftlqsqmrhMZE0= +sigs.k8s.io/controller-runtime v0.15.1 h1:9UvgKD4ZJGcj24vefUFgZFP3xej/3igL9BsOUTb/+4c= +sigs.k8s.io/controller-runtime v0.15.1/go.mod h1:7ngYvp1MLT+9GeZ+6lH3LOlcHkp/+tzA/fmHa4iq9kk= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/kustomize/kyaml v0.18.1 h1:WvBo56Wzw3fjS+7vBjN6TeivvpbW9GmRaWZ9CIVmt4E= +sigs.k8s.io/kustomize/kyaml v0.18.1/go.mod h1:C3L2BFVU1jgcddNBE1TxuVLgS46TjObMwW5FT9FcjYo= +sigs.k8s.io/structured-merge-diff/v4 v4.3.0 h1:UZbZAZfX0wV2zr7YZorDz6GXROfDFj6LvqCRm4VUVKk= +sigs.k8s.io/structured-merge-diff/v4 v4.3.0/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/cli/launchpad/doc.go b/cli/launchpad/doc.go index 90c382fa7ef..e7c178b5d80 100644 --- a/cli/launchpad/doc.go +++ b/cli/launchpad/doc.go @@ -1,7 +1,7 @@ // Package launchpad contains libraries for reading Cloud Foundation Toolkit custom // resource definitions and output Infrastructure as Code ready scripts. // -// Supported resources can be found in generic.go +// # Supported resources can be found in generic.go // // All resources implements resourceHandler interface and are expected // to have a full YAML representation with name {resource}YAML and additional @@ -12,12 +12,12 @@ // Implementer of a resource type should aim to track sub resources as an addition // field, as oppose to manipulating the parsed YAML directly. For example, // -// kind: Folder -// spec: -// id: X -// folders: -// - id: Y -// - id: Z +// kind: Folder +// spec: +// id: X +// folders: +// - id: Y +// - id: Z // // Folder X have Y, Z folders as sub resources. In the evaluation hierarchy, folder X // is a folderYAML representation with folderSpecYAML spec. Subdirectories Y, Z are @@ -38,26 +38,26 @@ // // An implicit reference occurs if a resource is nested under another resource. // -// kind: Folder -// spec: -// id: X -// folders: -// - id: Y +// kind: Folder +// spec: +// id: X +// folders: +// - id: Y // // Folder Y have an implicit reference of Folder X as a parent. // // An explicit reference occurs if a resource specified referenced type and id. // -// kind: Folder -// spec: -// id: Y -// parentRef: -// type: Folder -// id: X +// kind: Folder +// spec: +// id: Y +// parentRef: +// type: Folder +// id: X // // Folder Y have an explicit reference of Folder X as a parent. // // As references can have multiple use cases, all YAML definition will use `Ref` // as suffix for referenced fields. For example, `parentRef` to specify // parent-child relationship such as organization/folder, folder/folder. -package launchpad \ No newline at end of file +package launchpad diff --git a/cli/launchpad/folder_test.go b/cli/launchpad/folder_test.go index f3c59c8f423..a203ebc7307 100644 --- a/cli/launchpad/folder_test.go +++ b/cli/launchpad/folder_test.go @@ -1,8 +1,9 @@ package launchpad import ( - "github.com/stretchr/testify/assert" "testing" + + "github.com/stretchr/testify/assert" ) // newTestFolder generates a fully formed folder for testing usage. @@ -36,7 +37,7 @@ func TestFolders_add(t *testing.T) { t.Run(tc.name, func(t *testing.T) { fs := folders{} for _, newF := range tc.input { - _ = fs.add(newF) // silently ignore existing resource + _ = fs.add(newF) // silently ignore existing resource } assert.Equal(t, tc.output, fs, "expected folders to be the same") }) diff --git a/cli/launchpad/generic.go b/cli/launchpad/generic.go index 50aa064d793..136c7e10760 100644 --- a/cli/launchpad/generic.go +++ b/cli/launchpad/generic.go @@ -30,7 +30,7 @@ var ( errMissingRequiredField = errors.New("missing required field") errInvalidParent = errors.New("invalid parent reference") errInvalidInput = errors.New("invalid input") - tfNameRegex = regexp.MustCompile("^[a-zA-Z][a-zA-Z\\d\\-\\_]*$") + tfNameRegex = regexp.MustCompile(`^[a-zA-Z][a-zA-Z\d\-\_]*$`) ) // resourceHandler represents a resource that can be processed by launchpad. diff --git a/cli/launchpad/root.go b/cli/launchpad/root.go index 53b98993eda..23cf6b1d4f6 100644 --- a/cli/launchpad/root.go +++ b/cli/launchpad/root.go @@ -2,21 +2,22 @@ package launchpad import ( "fmt" - "github.com/pkg/errors" - "gopkg.in/yaml.v2" - "io/ioutil" "log" "os" "path/filepath" "strings" + + "github.com/pkg/errors" + "gopkg.in/yaml.v3" ) // NewGenerate takes file patterns as input YAMLs and output Infrastructure as // Code ready scripts based on specified output flavor. // // NewGenerate can be triggered by -// $ cft launchpad generate *.yaml -// $ cft lp g *.yaml +// +// $ cft launchpad generate *.yaml +// $ cft lp g *.yaml func NewGenerate(rawPaths []string, outFlavor OutputFlavor, outputDir string) { // attempt to load all configs with best effort log.Println("debug: output location", outputDir) // Remove after generate code is written @@ -104,7 +105,7 @@ func loadResources(rawPaths []string) []resourceHandler { // can in theory place their own file in matching relative path and overwrite the binary // default. func loadFile(fp string) (string, error) { - if content, err := ioutil.ReadFile(fp); err == nil { + if content, err := os.ReadFile(fp); err == nil { return string(content), nil } else { if !os.IsNotExist(err) { diff --git a/cli/launchpad/runtime_test.go b/cli/launchpad/runtime_test.go index 465e3178b33..cabfc63ee96 100644 --- a/cli/launchpad/runtime_test.go +++ b/cli/launchpad/runtime_test.go @@ -2,8 +2,9 @@ package launchpad import ( "testing" + + "github.com/stretchr/testify/assert" ) -import "github.com/stretchr/testify/assert" type dummyResource struct { headerYAML diff --git a/cli/launchpad/statics.go b/cli/launchpad/statics.go index 2b3f2521f5d..18cc88decb9 100644 --- a/cli/launchpad/statics.go +++ b/cli/launchpad/statics.go @@ -1,7 +1,8 @@ package launchpad + // WARNING: Generated file, do not modify directly! -var statics = map[string]string { +var statics = map[string]string{ "static/tmpl/tf/license.tf.tmpl": `/** * Copyright 2019 Google LLC * diff --git a/cli/report/cmd.go b/cli/report/cmd.go index 5fa6db5a411..57212cc9e9d 100644 --- a/cli/report/cmd.go +++ b/cli/report/cmd.go @@ -31,23 +31,37 @@ func init() { viper.AutomaticEnv() Cmd.Flags().StringVar(&flags.queryPath, "query-path", "", "Path to directory containing inventory queries") - Cmd.MarkFlagRequired("query-path") + err := Cmd.MarkFlagRequired("query-path") + if err != nil { + panic(err) + } Cmd.Flags().StringVar(&flags.outputPath, "output-path", "", "Path to directory to contain report outputs") - Cmd.MarkFlagRequired("output-path") + err = Cmd.MarkFlagRequired("output-path") + if err != nil { + panic(err) + } //Cmd.Flags().StringVar(&flags.bucketName, "bucket", "", "GCS bucket name for storing inventory (conflicts with --dir-path)") Cmd.Flags().StringVar(&flags.dirName, "dir-path", "", "Local directory path for storing inventory ") - Cmd.MarkFlagRequired("dir-path") + err = Cmd.MarkFlagRequired("dir-path") + if err != nil { + panic(err) + } Cmd.Flags().StringVar(&flags.reportFormat, "report-format", "", "Format of inventory report outputs, can be json or csv, default is csv") viper.SetDefault("report-format", "csv") - viper.BindPFlag("report-format", Cmd.Flags().Lookup("report-format")) + err = viper.BindPFlag("report-format", Cmd.Flags().Lookup("report-format")) + if err != nil { + panic(err) + } Cmd.AddCommand(listCmd) listCmd.Flags().StringVar(&flags.queryPath, "query-path", "", "Path to directory containing inventory queries") - listCmd.MarkFlagRequired("query-path") - + err = listCmd.MarkFlagRequired("query-path") + if err != nil { + panic(err) + } } // Cmd represents the base report command @@ -55,7 +69,7 @@ var Cmd = &cobra.Command{ Use: "report", Short: "Generate inventory reports based on CAI outputs in a directory.", Long: `Generate inventory reports for resources in Cloud Asset Inventory (CAI) output files, with reports defined in rego (in '/reports/sample' folder). - + Example: cft report --query-path /reports/sample \ --dir-path \ @@ -85,7 +99,7 @@ var listCmd = &cobra.Command{ Use: "list-available-reports", Short: "List available inventory report queries.", Long: `List available inventory report queries for resources in Cloud Asset Inventory (CAI). - + Example: cft report list-available-reports --query-path /reports/sample `, diff --git a/cli/report/convert.go b/cli/report/convert.go index 2d247c26863..50b8b1e9eb4 100644 --- a/cli/report/convert.go +++ b/cli/report/convert.go @@ -27,7 +27,7 @@ import ( // and concats all objects into one single array func ReadFilesAndConcat(dir string) (results []interface{}, err error) { files, err := listFiles(dir) - const maxCapacity = 1024*1024 + const maxCapacity = 1024 * 1024 if err != nil { return nil, err } @@ -41,7 +41,7 @@ func ReadFilesAndConcat(dir string) (results []interface{}, err error) { s := bufio.NewScanner(f) buf := make([]byte, maxCapacity) s.Buffer(buf, maxCapacity) - + for s.Scan() { var row map[string]interface{} err = json.Unmarshal(s.Bytes(), &row) diff --git a/cli/report/report.go b/cli/report/report.go index ce607cd11a4..b4eb49aa663 100644 --- a/cli/report/report.go +++ b/cli/report/report.go @@ -19,7 +19,6 @@ import ( "encoding/csv" "encoding/json" "fmt" - "io/ioutil" "os" "path/filepath" "sort" @@ -43,7 +42,11 @@ func GenerateReports(dirPath string, queryPath string, outputPath string, report if err != nil { return err } - printReports(results, outputPath, reportFormat, fileSuffix) + err = printReports(results, outputPath, reportFormat, fileSuffix) + if err != nil { + return err + } + return nil } @@ -57,7 +60,7 @@ func convertAndGenerateTempAssetFile(caiPath string, outputPath string, fileMidN } outJSON, _ := json.MarshalIndent(wrapped, "", " ") rawAssetFileName = "raw_assets_" + fileMidName + ".json" - err = ioutil.WriteFile(filepath.Join(outputPath, rawAssetFileName), outJSON, 0644) + err = os.WriteFile(filepath.Join(outputPath, rawAssetFileName), outJSON, 0644) if err != nil { return "", err } @@ -109,7 +112,7 @@ func printReports(results interface{}, reportOutputPath string, format string, f if err != nil { return err } - err = ioutil.WriteFile(filepath.Join(reportOutputPath, reportFileName), fileContent, 0644) + err = os.WriteFile(filepath.Join(reportOutputPath, reportFileName), fileContent, 0644) if err != nil { return err } @@ -128,7 +131,11 @@ func printReports(results interface{}, reportOutputPath string, format string, f keys = append(keys, key) } sort.Strings(keys) - w.Write(keys) + err := w.Write(keys) + if err != nil { + return err + } + w.Flush() for _, record := range contentSlice { recordMap := record.(map[string]interface{}) @@ -136,7 +143,10 @@ func printReports(results interface{}, reportOutputPath string, format string, f for _, key := range keys { record = append(record, recordMap[key].(string)) } - w.Write(record) + err = w.Write(record) + if err != nil { + return err + } } w.Flush() } diff --git a/cli/scorecard/cmd.go b/cli/scorecard/cmd.go index 78ce6c6ca52..0e00d1ed1b0 100644 --- a/cli/scorecard/cmd.go +++ b/cli/scorecard/cmd.go @@ -27,13 +27,19 @@ func init() { viper.AutomaticEnv() Cmd.Flags().StringVar(&flags.policyPath, "policy-path", "", "Path to directory containing validation policies") - Cmd.MarkFlagRequired("policy-path") + err := Cmd.MarkFlagRequired("policy-path") + if err != nil { + panic(err) + } Cmd.Flags().StringVar(&flags.outputPath, "output-path", "", "Path to directory to contain scorecard outputs. Output to console if not specified") Cmd.Flags().StringVar(&flags.outputFormat, "output-format", "txt", "Format of scorecard outputs, can be txt, json or csv") viper.SetDefault("output-format", "txt") - viper.BindPFlag("output-format", Cmd.Flags().Lookup("output-format")) + err = viper.BindPFlag("output-format", Cmd.Flags().Lookup("output-format")) + if err != nil { + panic(err) + } Cmd.Flags().StringSliceVar(&flags.metadataFields, "output-metadata-fields", []string{}, "List of comma delimited violation metadata fields to include in output. By default no metadata fields in output when --output-format is txt or csv. All metadata will be in output when --output-format is json.") @@ -45,7 +51,6 @@ func init() { Cmd.Flags().StringVar(&flags.targetProjectID, "target-project", "", "Project ID to analyze (Works with --bucket and --refresh; conflicts with --target-folder or --target--organization)") Cmd.Flags().StringVar(&flags.targetFolderID, "target-folder", "", "Folder ID to analyze (Works with --bucket and --refresh; conflicts with --target-project or --target--organization)") Cmd.Flags().StringVar(&flags.targetOrgID, "target-organization", "", "Organization ID to analyze (Works with --bucket and --refresh; conflicts with --target-project or --target--folder)") - } // Cmd represents the base scorecard command diff --git a/cli/scorecard/inventory.go b/cli/scorecard/inventory.go index c3736675afb..d20deec5335 100644 --- a/cli/scorecard/inventory.go +++ b/cli/scorecard/inventory.go @@ -23,7 +23,7 @@ import ( "github.com/pkg/errors" asset "cloud.google.com/go/asset/apiv1" - assetpb "google.golang.org/genproto/googleapis/cloud/asset/v1" + assetpb "cloud.google.com/go/asset/apiv1/assetpb" ) // InventoryConfig manages a CAI inventory diff --git a/cli/scorecard/main.go b/cli/scorecard/main.go index 49f16e6313e..38541484e49 100644 --- a/cli/scorecard/main.go +++ b/cli/scorecard/main.go @@ -18,5 +18,5 @@ import ( log "github.com/inconshreveable/log15" ) -// Log (log15) handler for Scorecard +// Log (log15) handler for Scorecard var Log = log.New() diff --git a/cli/scorecard/proto.go b/cli/scorecard/proto.go index 9a4cf8fd3e8..287443f9980 100644 --- a/cli/scorecard/proto.go +++ b/cli/scorecard/proto.go @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC +// Copyright 2019-2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,18 +17,17 @@ package scorecard import ( "encoding/json" "strconv" - "strings" - "github.com/golang/protobuf/jsonpb" - "github.com/golang/protobuf/proto" "github.com/pkg/errors" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" ) func unMarshallAsset(from []byte, to proto.Message) error { // CAI export returns org_policy [1] with update_time if Timestamp format in Seconds and Nanos - // but in jsonpb, Timestamp is expected to be a string in the RFC 3339 format [2]. + // but in protojson, Timestamp is expected to be a string in the RFC 3339 format [2]. // i.e. "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" - // Hence doing a workaround to remove the field so that jsonpb.Unmarshaler can handle org policy. + // Hence doing a workaround to remove the field so that protojson.Unmarshaler can handle org policy. // [1] https://github.com/googleapis/googleapis/blob/master/google/cloud/orgpolicy/v1/orgpolicy.proto // [2] https://godoc.org/google.golang.org/protobuf/types/known/timestamppb#Timestamp @@ -67,8 +66,8 @@ func protoViaJSON(from interface{}, to proto.Message) error { if err != nil { return errors.Wrap(err, "marshaling to json") } - umar := &jsonpb.Unmarshaler{AllowUnknownFields: true} - if err := umar.Unmarshal(strings.NewReader(string(jsn)), to); err != nil { + umar := &protojson.UnmarshalOptions{DiscardUnknown: true} + if err := umar.Unmarshal(jsn, to); err != nil { return errors.Wrap(err, "unmarshaling to proto") } @@ -78,14 +77,13 @@ func protoViaJSON(from interface{}, to proto.Message) error { // interfaceViaJSON uses JSON as an intermediary serialization to convert a protobuf message // into an interface value func interfaceViaJSON(from proto.Message) (interface{}, error) { - marshaler := &jsonpb.Marshaler{} - jsn, err := marshaler.MarshalToString(from) + jsn, err := protojson.Marshal(from) if err != nil { return nil, errors.Wrap(err, "marshaling to json") } var to interface{} - if err := json.Unmarshal([]byte(jsn), &to); err != nil { + if err := json.Unmarshal(jsn, &to); err != nil { return nil, errors.Wrap(err, "unmarshaling to interface") } @@ -95,15 +93,14 @@ func interfaceViaJSON(from proto.Message) (interface{}, error) { // stringViaJSON uses JSON as an intermediary serialization to convert a protobuf message // into an string value func stringViaJSON(from proto.Message) (string, error) { - marshaler := &jsonpb.Marshaler{} - jsn, err := marshaler.MarshalToString(from) + jsn, err := protojson.Marshal(from) if err != nil { return "", errors.Wrap(err, "marshaling to json") } - str, err := strconv.Unquote(jsn) + str, err := strconv.Unquote(string(jsn)) if err != nil { // return original json string if it's not a quoted string - return jsn, nil + return string(jsn), nil } return str, nil } diff --git a/cli/scorecard/proto_test.go b/cli/scorecard/proto_test.go index 235ccc1962f..b59731b6ff5 100644 --- a/cli/scorecard/proto_test.go +++ b/cli/scorecard/proto_test.go @@ -16,25 +16,32 @@ package scorecard import ( "encoding/json" - "io/ioutil" + "os" "testing" "github.com/GoogleCloudPlatform/config-validator/pkg/api/validator" "github.com/google/go-cmp/cmp" ) -func jsonToInterface(jsonStr string) map[string]interface{} { +func jsonToInterface(jsonStr string) (map[string]interface{}, error) { var interfaceVar map[string]interface{} - json.Unmarshal([]byte(jsonStr), &interfaceVar) - return interfaceVar + err := json.Unmarshal([]byte(jsonStr), &interfaceVar) + if err != nil { + return nil, err + } + + return interfaceVar, nil } func TestDataTypeTransformation(t *testing.T) { - fileContent, err := ioutil.ReadFile(testRoot + "/shared/iam_policy_audit_logs.json") + fileContent, err := os.ReadFile(testRoot + "/shared/iam_policy_audit_logs.json") + if err != nil { + t.Fatal("unexpected error", err) + } + asset, err := jsonToInterface(string(fileContent)) if err != nil { t.Fatal("unexpected error", err) } - asset := jsonToInterface(string(fileContent)) wantedName := "//cloudresourcemanager.googleapis.com/projects/23456" pbAsset := &validator.Asset{} @@ -72,7 +79,7 @@ func TestDataTypeTransformation(t *testing.T) { t.Fatalf("failed to parse JSON string %v: %v", gotStr, err) } - wantStr := `{"name":"//cloudresourcemanager.googleapis.com/projects/23456","assetType":"cloudresourcemanager.googleapis.com/Project","iamPolicy":{"version":1,"bindings":[{"role":"roles/owner","members":["user:user@example.com"]}]},"ancestors":["projects/1234","organizations/56789"]}` + wantStr := `{"name":"//cloudresourcemanager.googleapis.com/projects/23456","assetType":"cloudresourcemanager.googleapis.com/Project","iamPolicy":{"version":1,"bindings":[{"role":"roles/owner","members":["user:user@example.com"]}],"auditConfigs":[{"service":"storage.googleapis.com","auditLogConfigs":[{"logType":"ADMIN_READ"},{"logType":"DATA_READ"},{"logType":"DATA_WRITE"}]}]},"ancestors":["projects/1234","organizations/56789"]}` var wantJSON map[string]interface{} if err := json.Unmarshal([]byte(wantStr), &wantJSON); err != nil { t.Fatalf("failed to parse JSON string %v: %v", wantStr, err) diff --git a/cli/scorecard/score.go b/cli/scorecard/score.go index bf187d528e9..afcf1131193 100644 --- a/cli/scorecard/score.go +++ b/cli/scorecard/score.go @@ -28,8 +28,8 @@ import ( "github.com/GoogleCloudPlatform/config-validator/pkg/api/validator" "github.com/GoogleCloudPlatform/config-validator/pkg/gcv" - _struct "github.com/golang/protobuf/ptypes/struct" "github.com/pkg/errors" + "google.golang.org/protobuf/types/known/structpb" ) // ScoringConfig holds settings for generating a score @@ -100,12 +100,12 @@ func getConstraintShortName(constraintName string) string { // RichViolation holds a violation with its category type RichViolation struct { - validator.Violation `json:"-"` - Category string // category of violation - Resource string - Message string - Metadata *_struct.Value `protobuf:"bytes,4,opt,name=metadata,proto3" json:"metadata,omitempty"` - asset *validator.Asset `json:"-"` + *validator.Violation `json:"-"` + Category string // category of violation + Resource string + Message string + Metadata *structpb.Value `protobuf:"bytes,4,opt,name=metadata,proto3" json:"metadata,omitempty"` + asset *validator.Asset `json:"-"` } var availableCategories = map[string]string{ @@ -116,7 +116,7 @@ var availableCategories = map[string]string{ } func (config *ScoringConfig) getConstraintForViolation(violation *RichViolation) (*constraintViolations, error) { - key := violation.GetConstraint() + key := violation.Violation.GetConstraint() cv, found := config.constraints[key] if !found { constraint := key @@ -125,7 +125,7 @@ func (config *ScoringConfig) getConstraintForViolation(violation *RichViolation) } config.constraints[key] = cv - metadata := violation.GetMetadata().GetStructValue().GetFields()["constraint"] + metadata := violation.Violation.GetMetadata().GetStructValue().GetFields()["constraint"] annotations := metadata.GetStructValue().GetFields()["annotations"].GetStructValue().GetFields() categoryKey := otherCategoryKey categoryValue, found := annotations["bundles.validator.forsetisecurity.org/scorecard-v1"] @@ -192,7 +192,7 @@ func writeResults(config *ScoringConfig, dest io.Writer, outputFormat string, ou } } richViolations = append(richViolations, v) - Log.Debug("Violation metadata", "metadata", v.GetMetadata()) + Log.Debug("Violation metadata", "metadata", v.Violation.GetMetadata()) } } } @@ -200,15 +200,21 @@ func writeResults(config *ScoringConfig, dest io.Writer, outputFormat string, ou if err != nil { return err } - io.WriteString(dest, string(byteContent)+"\n") + _, err = io.WriteString(dest, string(byteContent)+"\n") + if err != nil { + return err + } + return nil case "csv": w := csv.NewWriter(dest) header := []string{"Category", "Constraint", "Resource", "Message", "Parent"} - for _, field := range outputMetadataFields { - header = append(header, field) + header = append(header, outputMetadataFields...) + err := w.Write(header) + if err != nil { + return err } - w.Write(header) + w.Flush() for _, category := range config.categories { for _, cv := range category.constraints { @@ -217,37 +223,63 @@ func writeResults(config *ScoringConfig, dest io.Writer, outputFormat string, ou if len(v.asset.Ancestors) > 0 { parent = v.asset.Ancestors[0] } - record := []string{category.Name, getConstraintShortName(v.Constraint), v.Resource, v.Message, parent} + record := []string{category.Name, getConstraintShortName(v.Violation.Constraint), v.Resource, v.Message, parent} for _, field := range outputMetadataFields { metadata := v.Metadata.GetStructValue().Fields["details"].GetStructValue().Fields[field] value, _ := stringViaJSON(metadata) record = append(record, value) } - w.Write(record) + err := w.Write(record) + if err != nil { + return err + } + w.Flush() - Log.Debug("Violation metadata", "metadata", v.GetMetadata()) + Log.Debug("Violation metadata", "metadata", v.Violation.GetMetadata()) } } } return nil case "txt": - io.WriteString(dest, fmt.Sprintf("\n\n%v total issues found\n", config.CountViolations())) + _, err := io.WriteString(dest, fmt.Sprintf("\n\n%v total issues found\n", config.CountViolations())) + if err != nil { + return err + } + for _, category := range config.categories { - io.WriteString(dest, fmt.Sprintf("\n\n%v: %v issues found\n", category.Name, category.Count())) - io.WriteString(dest, fmt.Sprintf("----------\n")) + _, err = io.WriteString(dest, fmt.Sprintf("\n\n%v: %v issues found\n", category.Name, category.Count())) + if err != nil { + return err + } + _, err = io.WriteString(dest, "----------\n") + if err != nil { + return err + } for _, cv := range category.constraints { - io.WriteString(dest, fmt.Sprintf("%v: %v issues\n", getConstraintShortName(cv.constraint), cv.Count())) + _, err = io.WriteString(dest, fmt.Sprintf("%v: %v issues\n", getConstraintShortName(cv.constraint), cv.Count())) + if err != nil { + return err + } for _, v := range cv.Violations { - io.WriteString(dest, fmt.Sprintf("- %v\n", v.Message)) + _, err = io.WriteString(dest, fmt.Sprintf("- %v\n", v.Message)) + if err != nil { + return err + } for _, field := range outputMetadataFields { metadata := v.Metadata.GetStructValue().Fields["details"].GetStructValue().Fields[field] value, _ := stringViaJSON(metadata) if value != "" { - io.WriteString(dest, fmt.Sprintf(" %v: %v\n", field, value)) + _, err = io.WriteString(dest, fmt.Sprintf(" %v: %v\n", field, value)) + if err != nil { + return err + } } } - io.WriteString(dest, "\n") - Log.Debug("Violation metadata", "metadata", v.GetMetadata()) + _, err = io.WriteString(dest, "\n") + if err != nil { + return err + } + Log.Debug("Violation metadata", "metadata", v.Violation.GetMetadata()) } } } @@ -289,7 +321,10 @@ func (inventory *InventoryConfig) Score(config *ScoringConfig, outputPath string } } // Code to measure - writeResults(config, dest, outputFormat, outputMetadataFields) + err := writeResults(config, dest, outputFormat, outputMetadataFields) + if err != nil { + return err + } } else { fmt.Println("No issues found found! You have a perfect score.") } diff --git a/cli/scorecard/score_test.go b/cli/scorecard/score_test.go index d25fc6a5169..0e0458c057b 100644 --- a/cli/scorecard/score_test.go +++ b/cli/scorecard/score_test.go @@ -19,7 +19,7 @@ import ( "bytes" "context" "encoding/json" - "io/ioutil" + "os" "testing" "github.com/stretchr/testify/assert" @@ -78,13 +78,17 @@ func TestWriteViolations(t *testing.T) { for _, tc := range tests { output := new(bytes.Buffer) - fileContent, err := ioutil.ReadFile(testRoot + "/output/" + tc.filename) + fileContent, err := os.ReadFile(testRoot + "/output/" + tc.filename) if err != nil { t.Fatal("unexpected error", err) } expected := tc.listMaker(fileContent) - writeResults(config, output, tc.format, nil) + err = writeResults(config, output, tc.format, nil) + if err != nil { + t.Fatal("unexpected error", err) + } + actual := tc.listMaker(output.Bytes()) assert.ElementsMatch(t, expected, actual, tc.message) diff --git a/cli/scorecard/violations.go b/cli/scorecard/violations.go index a43062be77f..58685728361 100644 --- a/cli/scorecard/violations.go +++ b/cli/scorecard/violations.go @@ -137,7 +137,7 @@ func getViolations(inventory *InventoryConfig, config *ScoringConfig) ([]*RichVi wp.Stop() } for _, violation := range violations { - richViolation := RichViolation{*violation, "", violation.Resource, violation.Message, violation.Metadata, asset} + richViolation := RichViolation{violation, "", violation.Resource, violation.Message, violation.Metadata, asset} mu.Lock() richViolations = append(richViolations, &richViolation) mu.Unlock() diff --git a/cli/scorecard/violations_test.go b/cli/scorecard/violations_test.go index 21814c9a8a1..89f4f06e435 100644 --- a/cli/scorecard/violations_test.go +++ b/cli/scorecard/violations_test.go @@ -16,7 +16,7 @@ package scorecard import ( "context" - "io/ioutil" + "os" "testing" ) @@ -58,7 +58,7 @@ func TestGetAssetFromJSON(t *testing.T) { } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - fileContent, err := ioutil.ReadFile(testRoot + tc.assetJSONFile) + fileContent, err := os.ReadFile(testRoot + tc.assetJSONFile) if err != nil { t.Fatal("unexpected error", err) } @@ -78,7 +78,6 @@ func TestGetAssetFromJSON(t *testing.T) { if tc.isIamPolicy && pbAsset.IamPolicy == nil { t.Errorf("wanted IAM Policy bindings, got %s", pbAsset) } - }) } } @@ -113,8 +112,8 @@ func TestGetViolations(t *testing.T) { } violationMap := make(map[string]int) for _, v := range violations { - violationMap[v.Constraint+"-"+v.Resource] = 1 - Log.Debug("Found violation", "constraint", v.Constraint, "resource", v.Resource) + violationMap[v.Violation.Constraint+"-"+v.Resource] = 1 + Log.Debug("Found violation", "constraint", v.Violation.Constraint, "resource", v.Resource) } for _, tc := range testCases { diff --git a/cli/testdata/bpmetadata/assets/icon.png b/cli/testdata/bpmetadata/assets/icon.png new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cli/testdata/bpmetadata/content/examples-some-without-tf/examples/acm/acm-terraform-blog-part1/README.md b/cli/testdata/bpmetadata/content/examples-some-without-tf/examples/acm/acm-terraform-blog-part1/README.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cli/testdata/bpmetadata/content/examples-some-without-tf/examples/acm/acm-terraform-blog-part1/terraform/main.tf b/cli/testdata/bpmetadata/content/examples-some-without-tf/examples/acm/acm-terraform-blog-part1/terraform/main.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cli/testdata/bpmetadata/content/examples-some-without-tf/examples/acm/acm-terraform-blog-part2/README.md b/cli/testdata/bpmetadata/content/examples-some-without-tf/examples/acm/acm-terraform-blog-part2/README.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cli/testdata/bpmetadata/content/examples-some-without-tf/examples/beta_cluster/README.md b/cli/testdata/bpmetadata/content/examples-some-without-tf/examples/beta_cluster/README.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cli/testdata/bpmetadata/content/examples-some-without-tf/examples/simple_regional/README.md b/cli/testdata/bpmetadata/content/examples-some-without-tf/examples/simple_regional/README.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cli/testdata/bpmetadata/content/examples-some-without-tf/examples/simple_regional/main.tf b/cli/testdata/bpmetadata/content/examples-some-without-tf/examples/simple_regional/main.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cli/testdata/bpmetadata/content/examples/acm/acm-terraform-blog-part1/README.md b/cli/testdata/bpmetadata/content/examples/acm/acm-terraform-blog-part1/README.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cli/testdata/bpmetadata/content/examples/acm/acm-terraform-blog-part1/terraform/main.tf b/cli/testdata/bpmetadata/content/examples/acm/acm-terraform-blog-part1/terraform/main.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cli/testdata/bpmetadata/content/examples/acm/acm-terraform-blog-part1/terraform/metadata.yaml b/cli/testdata/bpmetadata/content/examples/acm/acm-terraform-blog-part1/terraform/metadata.yaml new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cli/testdata/bpmetadata/content/examples/acm/acm-terraform-blog-part2/README.md b/cli/testdata/bpmetadata/content/examples/acm/acm-terraform-blog-part2/README.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cli/testdata/bpmetadata/content/examples/acm/acm-terraform-blog-part2/main.tf b/cli/testdata/bpmetadata/content/examples/acm/acm-terraform-blog-part2/main.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cli/testdata/bpmetadata/content/examples/acm/metadata.display.yaml b/cli/testdata/bpmetadata/content/examples/acm/metadata.display.yaml new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cli/testdata/bpmetadata/content/examples/acm/metadata.yaml b/cli/testdata/bpmetadata/content/examples/acm/metadata.yaml new file mode 100644 index 00000000000..27ac3f5ca8a --- /dev/null +++ b/cli/testdata/bpmetadata/content/examples/acm/metadata.yaml @@ -0,0 +1,7 @@ +apiVersion: blueprints.cloud.google.com/v1alpha1 +kind: BlueprintMetadata +metadata: + name: terraform-google-acm +spec: + info: + title: Terraform Google Module ACM diff --git a/cli/testdata/bpmetadata/content/examples/acm/modules/submodule-01/README.md b/cli/testdata/bpmetadata/content/examples/acm/modules/submodule-01/README.md new file mode 100644 index 00000000000..b904c60be62 --- /dev/null +++ b/cli/testdata/bpmetadata/content/examples/acm/modules/submodule-01/README.md @@ -0,0 +1 @@ +# ACM - Submodule 01 diff --git a/cli/testdata/bpmetadata/content/examples/simple_regional/README.md b/cli/testdata/bpmetadata/content/examples/simple_regional/README.md new file mode 100644 index 00000000000..35e173a0e14 --- /dev/null +++ b/cli/testdata/bpmetadata/content/examples/simple_regional/README.md @@ -0,0 +1,3 @@ +# Simple Regional + +This is a simple blueprint diff --git a/cli/testdata/bpmetadata/content/examples/simple_regional/main.tf b/cli/testdata/bpmetadata/content/examples/simple_regional/main.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cli/testdata/bpmetadata/content/examples/simple_regional/metadata.yaml b/cli/testdata/bpmetadata/content/examples/simple_regional/metadata.yaml new file mode 100644 index 00000000000..b82a85e7d27 --- /dev/null +++ b/cli/testdata/bpmetadata/content/examples/simple_regional/metadata.yaml @@ -0,0 +1,10 @@ +apiVersion: blueprints.cloud.google.com/v1alpha1 +kind: BlueprintMetadata +metadata: + name: simple-regional +spec: + info: + title: Simple Regional + source: + repo: https://github.com/GoogleCloudPlatform/simple-regional + sourceType: git diff --git a/cli/testdata/bpmetadata/content/examples/simple_regional/modules/submodule-01/main.tf b/cli/testdata/bpmetadata/content/examples/simple_regional/modules/submodule-01/main.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cli/testdata/bpmetadata/content/examples/simple_regional_beta/README.md b/cli/testdata/bpmetadata/content/examples/simple_regional_beta/README.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cli/testdata/bpmetadata/content/examples/simple_regional_beta/main.tf b/cli/testdata/bpmetadata/content/examples/simple_regional_beta/main.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cli/testdata/bpmetadata/content/examples/simple_regional_beta/modules/submodule-01/README.md b/cli/testdata/bpmetadata/content/examples/simple_regional_beta/modules/submodule-01/README.md new file mode 100644 index 00000000000..e7ff51f3a09 --- /dev/null +++ b/cli/testdata/bpmetadata/content/examples/simple_regional_beta/modules/submodule-01/README.md @@ -0,0 +1 @@ +# Simple Regional Beta - Submodule 01 diff --git a/cli/testdata/bpmetadata/content/examples/simple_regional_beta/variables.tf b/cli/testdata/bpmetadata/content/examples/simple_regional_beta/variables.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cli/testdata/bpmetadata/content/modules-no-tf/modules/beta-public-cluster/README.md b/cli/testdata/bpmetadata/content/modules-no-tf/modules/beta-public-cluster/README.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cli/testdata/bpmetadata/content/modules/beta-public-cluster/README.md b/cli/testdata/bpmetadata/content/modules/beta-public-cluster/README.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cli/testdata/bpmetadata/content/modules/beta-public-cluster/main.tf b/cli/testdata/bpmetadata/content/modules/beta-public-cluster/main.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cli/testdata/bpmetadata/content/modules/binary-authorization/README.md b/cli/testdata/bpmetadata/content/modules/binary-authorization/README.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cli/testdata/bpmetadata/content/modules/binary-authorization/main.tf b/cli/testdata/bpmetadata/content/modules/binary-authorization/main.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cli/testdata/bpmetadata/content/modules/private-cluster/README.md b/cli/testdata/bpmetadata/content/modules/private-cluster/README.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cli/testdata/bpmetadata/content/modules/private-cluster/main.tf b/cli/testdata/bpmetadata/content/modules/private-cluster/main.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cli/testdata/bpmetadata/md/list-content.md b/cli/testdata/bpmetadata/md/list-content.md new file mode 100644 index 00000000000..ab365389499 --- /dev/null +++ b/cli/testdata/bpmetadata/md/list-content.md @@ -0,0 +1,46 @@ +# h1 doc title +some content doc title for h1 +some content doc title for h1 + +## h2 sub heading +some content sub heading for h2 + +## Documentation +- [document-01](http://google.com/doc-01) +- [document-02](http://google.com/doc-02) +- [document-03](http://google.com/doc-03) +- [document-04](http://google.com/doc-04) + +### h3 sub sub heading + +## Horizontal Rules + +## Diagrams +- text-document-01 +- text-document-02 + +## Description: +### tagline +Opinionated GCP project creation + +### detailed +This blueprint allows you to create opinionated Google Cloud Platform projects. +It creates projects and configures aspects like Shared VPC connectivity, IAM access, Service Accounts, and API enablement to follow best practices. + +### preDeploy +To deploy this blueprint you must have an active billing account and billing permissions. + +### Architecture +1. User requests are sent to the front end, which is deployed on two Cloud Run services as containers to support high scalability applications. +2. The request then lands on the middle tier, which is the API layer that provides access to the backend. This is also deployed on Cloud Run for scalability and ease of deployment in multiple languages. This middleware is a Golang based API. + +### Architecture +![Test Image](https://i.redd.it/w3kr4m2fi3111.png) +1. Step 1 +2. Step 2 +3. Step 3 + +### ArchitectureNotValid +1. Step 1 +2. Step 2 +3. Step 3 diff --git a/cli/testdata/bpmetadata/md/simple-content.md b/cli/testdata/bpmetadata/md/simple-content.md new file mode 100644 index 00000000000..afd91de989c --- /dev/null +++ b/cli/testdata/bpmetadata/md/simple-content.md @@ -0,0 +1,29 @@ +# h1 doc title +some content doc title for h1 + +## h2 sub heading +some content sub heading for h2 + +### h3 sub sub heading +some content sub heading for h3 + + + + +### h3 sub sub heading but order 2 +some more content sub heading for h3 + +## Horizontal Rules + +### Deployment Duration +Configuration: 2 mins +Deployment: 10 mins + +### Deployment Duration Only Config +Configuration: 2 mins + +### Deployment Duration Invalid +Configuration or deployment info does not exist as defined + +### Cost +[Solution cost details](https://cloud.google.com/products/calculator?id=02fb0c45-cc29-4567-8cc6-f72ac9024add) diff --git a/cli/testdata/bpmetadata/metadata/existing_interfaces_with_one_connection_metadata.yaml b/cli/testdata/bpmetadata/metadata/existing_interfaces_with_one_connection_metadata.yaml new file mode 100644 index 00000000000..53fbaf86cc2 --- /dev/null +++ b/cli/testdata/bpmetadata/metadata/existing_interfaces_with_one_connection_metadata.yaml @@ -0,0 +1,65 @@ +apiVersion: blueprints.cloud.google.com/v1alpha1 +kind: BlueprintMetadata +metadata: + name: terraform-google-memorystore + annotations: + config.kubernetes.io/local-config: "true" +spec: + info: + title: Google Cloud Memorystore Terraform Module + source: + repo: https://github.com/terraform-google-modules/terraform-google-memorystore.git + sourceType: git + version: 10.0.0 + actuationTool: + flavor: Terraform + version: ">= 0.13" + description: {} + content: + subBlueprints: + - name: memcache + location: modules/memcache + - name: redis-cluster + location: modules/redis-cluster + examples: + - name: basic + location: examples/basic + - name: memcache + location: examples/memcache + - name: minimal + location: examples/minimal + - name: redis + location: examples/redis + - name: redis-cluster + location: examples/redis-cluster + interfaces: + variables: + - name: auth_enabled + description: Indicates whether OSS Redis AUTH is enabled for the instance. If set to true AUTH is enabled on the instance. + varType: bool + defaultValue: false + - name: authorized_network + description: The full name of the Google Compute Engine network to which the instance is connected. If left unspecified, the default network will be used. + varType: string + connections: + - source: + source: github.com/terraform-google-modules/terraform-google-network//modules/vpc + version: v9.1.0 + spec: + outputExpr: network_name + - name: customer_managed_key + description: Default encryption key to apply to the Redis instance. Defaults to null (Google-managed). + varType: string + requirements: + roles: + - level: Project + roles: + - roles/owner + services: + - cloudresourcemanager.googleapis.com + - serviceusage.googleapis.com + - redis.googleapis.com + - memcache.googleapis.com + - serviceconsumermanagement.googleapis.com + - networkconnectivity.googleapis.com + - compute.googleapis.com \ No newline at end of file diff --git a/cli/testdata/bpmetadata/metadata/existing_interfaces_with_some_connections_metadata.yaml b/cli/testdata/bpmetadata/metadata/existing_interfaces_with_some_connections_metadata.yaml new file mode 100644 index 00000000000..41af1a87828 --- /dev/null +++ b/cli/testdata/bpmetadata/metadata/existing_interfaces_with_some_connections_metadata.yaml @@ -0,0 +1,76 @@ +apiVersion: blueprints.cloud.google.com/v1alpha1 +kind: BlueprintMetadata +metadata: + name: terraform-google-memorystore + annotations: + config.kubernetes.io/local-config: "true" +spec: + info: + title: Google Cloud Memorystore Terraform Module + source: + repo: https://github.com/terraform-google-modules/terraform-google-memorystore.git + sourceType: git + version: 10.0.0 + actuationTool: + flavor: Terraform + version: ">= 0.13" + description: {} + content: + subBlueprints: + - name: memcache + location: modules/memcache + - name: redis-cluster + location: modules/redis-cluster + examples: + - name: basic + location: examples/basic + - name: memcache + location: examples/memcache + - name: minimal + location: examples/minimal + - name: redis + location: examples/redis + - name: redis-cluster + location: examples/redis-cluster + interfaces: + variables: + - name: auth_enabled + description: Indicates whether OSS Redis AUTH is enabled for the instance. If set to true AUTH is enabled on the instance. + varType: bool + defaultValue: false + - name: authorized_network + description: The full name of the Google Compute Engine network to which the instance is connected. If left unspecified, the default network will be used. + varType: string + connections: + - source: + source: github.com/terraform-google-modules/terraform-google-network//modules/vpc + version: v9.1.0 + spec: + outputExpr: network_name + - source: + source: github.com/terraform-google-modules/terraform-google-vpc-service-controls//modules/access_level + version: v6.0.0 + spec: + outputExpr: network_acl_name + - name: customer_managed_key + description: Default encryption key to apply to the Redis instance. Defaults to null (Google-managed). + varType: string + connections: + - source: + source: github.com/terraform-google-modules/terraform-google-kms + version: v2.3.0 + spec: + outputExpr: kms_name + requirements: + roles: + - level: Project + roles: + - roles/owner + services: + - cloudresourcemanager.googleapis.com + - serviceusage.googleapis.com + - redis.googleapis.com + - memcache.googleapis.com + - serviceconsumermanagement.googleapis.com + - networkconnectivity.googleapis.com + - compute.googleapis.com \ No newline at end of file diff --git a/cli/testdata/bpmetadata/metadata/existing_interfaces_without_connections_metadata.yaml b/cli/testdata/bpmetadata/metadata/existing_interfaces_without_connections_metadata.yaml new file mode 100644 index 00000000000..cceab1772d7 --- /dev/null +++ b/cli/testdata/bpmetadata/metadata/existing_interfaces_without_connections_metadata.yaml @@ -0,0 +1,59 @@ +apiVersion: blueprints.cloud.google.com/v1alpha1 +kind: BlueprintMetadata +metadata: + name: terraform-google-memorystore + annotations: + config.kubernetes.io/local-config: "true" +spec: + info: + title: Google Cloud Memorystore Terraform Module + source: + repo: https://github.com/terraform-google-modules/terraform-google-memorystore.git + sourceType: git + version: 10.0.0 + actuationTool: + flavor: Terraform + version: ">= 0.13" + description: {} + content: + subBlueprints: + - name: memcache + location: modules/memcache + - name: redis-cluster + location: modules/redis-cluster + examples: + - name: basic + location: examples/basic + - name: memcache + location: examples/memcache + - name: minimal + location: examples/minimal + - name: redis + location: examples/redis + - name: redis-cluster + location: examples/redis-cluster + interfaces: + variables: + - name: auth_enabled + description: Indicates whether OSS Redis AUTH is enabled for the instance. If set to true AUTH is enabled on the instance. + varType: bool + defaultValue: false + - name: authorized_network + description: The full name of the Google Compute Engine network to which the instance is connected. If left unspecified, the default network will be used. + varType: string + - name: customer_managed_key + description: Default encryption key to apply to the Redis instance. Defaults to null (Google-managed). + varType: string + requirements: + roles: + - level: Project + roles: + - roles/owner + services: + - cloudresourcemanager.googleapis.com + - serviceusage.googleapis.com + - redis.googleapis.com + - memcache.googleapis.com + - serviceconsumermanagement.googleapis.com + - networkconnectivity.googleapis.com + - compute.googleapis.com \ No newline at end of file diff --git a/cli/testdata/bpmetadata/metadata/interfaces_with_full_output_types_metadata.yaml b/cli/testdata/bpmetadata/metadata/interfaces_with_full_output_types_metadata.yaml new file mode 100644 index 00000000000..2359bc181a1 --- /dev/null +++ b/cli/testdata/bpmetadata/metadata/interfaces_with_full_output_types_metadata.yaml @@ -0,0 +1,17 @@ +apiVersion: blueprints.cloud.google.com/v1alpha1 +kind: BlueprintMetadata +metadata: + name: terraform-google-network +spec: + interfaces: + outputs: + - name: cluster_id + description: Cluster ID + type: string + - name: endpoint + description: Cluster endpoint + type: + - list + - - object: + - host: string + port: number \ No newline at end of file diff --git a/cli/testdata/bpmetadata/metadata/interfaces_with_new_output_types_metadata.yaml b/cli/testdata/bpmetadata/metadata/interfaces_with_new_output_types_metadata.yaml new file mode 100644 index 00000000000..424365c833c --- /dev/null +++ b/cli/testdata/bpmetadata/metadata/interfaces_with_new_output_types_metadata.yaml @@ -0,0 +1,16 @@ +apiVersion: blueprints.cloud.google.com/v1alpha1 +kind: BlueprintMetadata +metadata: + name: terraform-google-network +spec: + interfaces: + outputs: + - name: cluster_id + description: Cluster ID + type: string + - name: endpoint + description: Cluster endpoint + type: + - tuple + - string + number \ No newline at end of file diff --git a/cli/testdata/bpmetadata/metadata/interfaces_with_partial_output_types_metadata.yaml b/cli/testdata/bpmetadata/metadata/interfaces_with_partial_output_types_metadata.yaml new file mode 100644 index 00000000000..fc83b3dc8fe --- /dev/null +++ b/cli/testdata/bpmetadata/metadata/interfaces_with_partial_output_types_metadata.yaml @@ -0,0 +1,16 @@ +apiVersion: blueprints.cloud.google.com/v1alpha1 +kind: BlueprintMetadata +metadata: + name: terraform-google-network +spec: + interfaces: + outputs: + - name: cluster_id + description: Cluster ID + - name: endpoint + description: Cluster endpoint + type: + - list + - - object: + - host: string + port: number \ No newline at end of file diff --git a/cli/testdata/bpmetadata/metadata/interfaces_without_output_types_metadata.yaml b/cli/testdata/bpmetadata/metadata/interfaces_without_output_types_metadata.yaml new file mode 100644 index 00000000000..35e68206506 --- /dev/null +++ b/cli/testdata/bpmetadata/metadata/interfaces_without_output_types_metadata.yaml @@ -0,0 +1,14 @@ +# interfaces_without_output_types_metadata.yaml +apiVersion: blueprints.cloud.google.com/v1alpha1 +kind: BlueprintMetadata +metadata: + name: sample-module + annotations: + config.kubernetes.io/local-config: "true" +spec: + interfaces: + outputs: + - name: cluster_id + description: Cluster ID + - name: endpoint + description: Cluster endpoint \ No newline at end of file diff --git a/cli/testdata/bpmetadata/metadata/new_interfaces_no_connections_metadata.yaml b/cli/testdata/bpmetadata/metadata/new_interfaces_no_connections_metadata.yaml new file mode 100644 index 00000000000..cceab1772d7 --- /dev/null +++ b/cli/testdata/bpmetadata/metadata/new_interfaces_no_connections_metadata.yaml @@ -0,0 +1,59 @@ +apiVersion: blueprints.cloud.google.com/v1alpha1 +kind: BlueprintMetadata +metadata: + name: terraform-google-memorystore + annotations: + config.kubernetes.io/local-config: "true" +spec: + info: + title: Google Cloud Memorystore Terraform Module + source: + repo: https://github.com/terraform-google-modules/terraform-google-memorystore.git + sourceType: git + version: 10.0.0 + actuationTool: + flavor: Terraform + version: ">= 0.13" + description: {} + content: + subBlueprints: + - name: memcache + location: modules/memcache + - name: redis-cluster + location: modules/redis-cluster + examples: + - name: basic + location: examples/basic + - name: memcache + location: examples/memcache + - name: minimal + location: examples/minimal + - name: redis + location: examples/redis + - name: redis-cluster + location: examples/redis-cluster + interfaces: + variables: + - name: auth_enabled + description: Indicates whether OSS Redis AUTH is enabled for the instance. If set to true AUTH is enabled on the instance. + varType: bool + defaultValue: false + - name: authorized_network + description: The full name of the Google Compute Engine network to which the instance is connected. If left unspecified, the default network will be used. + varType: string + - name: customer_managed_key + description: Default encryption key to apply to the Redis instance. Defaults to null (Google-managed). + varType: string + requirements: + roles: + - level: Project + roles: + - roles/owner + services: + - cloudresourcemanager.googleapis.com + - serviceusage.googleapis.com + - redis.googleapis.com + - memcache.googleapis.com + - serviceconsumermanagement.googleapis.com + - networkconnectivity.googleapis.com + - compute.googleapis.com \ No newline at end of file diff --git a/cli/testdata/bpmetadata/schema/empty-metadata.yaml b/cli/testdata/bpmetadata/schema/empty-metadata.yaml new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cli/testdata/bpmetadata/schema/invalid-metadata-w-enum.yaml b/cli/testdata/bpmetadata/schema/invalid-metadata-w-enum.yaml new file mode 100644 index 00000000000..bf22d109545 --- /dev/null +++ b/cli/testdata/bpmetadata/schema/invalid-metadata-w-enum.yaml @@ -0,0 +1,14 @@ +apiVersion: blueprints.cloud.google.com/v1alpha1 +kind: BlueprintMetadata +metadata: + name: terraform-google-module +spec: + info: + title: Terraform Google Module + source: + repo: https://github.com/GoogleCloudPlatform/terraform-google-module.git + sourceType: git + quotaDetails: + - type: BAD_ENUM + quotaType: + MACHINE_TYPE: "n1-standard-8" diff --git a/cli/testdata/bpmetadata/schema/invalid-metadata.yaml b/cli/testdata/bpmetadata/schema/invalid-metadata.yaml new file mode 100644 index 00000000000..afae8314b17 --- /dev/null +++ b/cli/testdata/bpmetadata/schema/invalid-metadata.yaml @@ -0,0 +1,4 @@ +apiVersion: blueprints.cloud.google.com/v1alpha1 +kind: BlueprintMetadata +metadata: + name: terraform-google-module diff --git a/cli/testdata/bpmetadata/schema/valid-display-metadata-alternate-defaults.yaml b/cli/testdata/bpmetadata/schema/valid-display-metadata-alternate-defaults.yaml new file mode 100644 index 00000000000..43404b1f506 --- /dev/null +++ b/cli/testdata/bpmetadata/schema/valid-display-metadata-alternate-defaults.yaml @@ -0,0 +1,50 @@ +apiVersion: blueprints.cloud.google.com/v1alpha1 +kind: BlueprintMetadata +metadata: + name: terraform-google-module +spec: + info: + title: Terraform Google Module + source: + repo: https://github.com/GoogleCloudPlatform/terraform-google-module.git + sourceType: git + ui: + input: + variables: + string_type: + name: string_type + title: String type + altDefaults: + - type: ALTERNATE_TYPE_DC + value: REGIONAL + bool_type: + name: bool_type + title: Bool type + altDefaults: + - type: ALTERNATE_TYPE_DC + value: true + number_type: + name: number_type + title: Number type + altDefaults: + - type: ALTERNATE_TYPE_DC + value: 1 + object_type: + name: object_type + title: Object type + altDefaults: + - type: ALTERNATE_TYPE_DC + value: + key: value + list_type: + name: list_type + title: List type + altDefaults: + - type: ALTERNATE_TYPE_DC + value: + - item1 + - item2 + runtime: + outputs: + output1: + visibility: VISIBILITY_ROOT diff --git a/cli/testdata/bpmetadata/schema/valid-metadata-connections.yaml b/cli/testdata/bpmetadata/schema/valid-metadata-connections.yaml new file mode 100644 index 00000000000..a3cd295fae9 --- /dev/null +++ b/cli/testdata/bpmetadata/schema/valid-metadata-connections.yaml @@ -0,0 +1,45 @@ +apiVersion: blueprints.cloud.google.com/v1alpha1 +kind: BlueprintMetadata +metadata: + name: terraform-google-module +spec: + info: + title: Terraform Google Module + source: + repo: https://github.com/GoogleCloudPlatform/terraform-google-module.git + sourceType: git + interfaces: + variables: + - name: foo + connections: + - source: + source: "GoogleCloudPlatform/terraform-google-module1" + version: "~> v1" + spec: + outputExpr: "field1" + inputPath: "nested.field" + - name: bar + connections: + - source: + source: "GoogleCloudPlatform/terraform-google-module1" + version: "~> v1" + spec: + outputExpr: "field1" + - source: + source: "GoogleCloudPlatform/terraform-google-module1" + version: "~> v2" + spec: + outputExpr: "field2" + - name: baz + outputs: + - name: qux + type: [ + "object", + { + "VAR1": "string", + "VAR2": "number" + } + ] + + + diff --git a/cli/testdata/bpmetadata/schema/valid-metadata-w-enum.yaml b/cli/testdata/bpmetadata/schema/valid-metadata-w-enum.yaml new file mode 100644 index 00000000000..53194345c8d --- /dev/null +++ b/cli/testdata/bpmetadata/schema/valid-metadata-w-enum.yaml @@ -0,0 +1,14 @@ +apiVersion: blueprints.cloud.google.com/v1alpha1 +kind: BlueprintMetadata +metadata: + name: terraform-google-module +spec: + info: + title: Terraform Google Module + source: + repo: https://github.com/GoogleCloudPlatform/terraform-google-module.git + sourceType: git + quotaDetails: + - resourceType: QRT_GCE_INSTANCE + quotaType: + MACHINE_TYPE: "n1-standard-8" diff --git a/cli/testdata/bpmetadata/schema/valid-metadata.yaml b/cli/testdata/bpmetadata/schema/valid-metadata.yaml new file mode 100644 index 00000000000..1df71932122 --- /dev/null +++ b/cli/testdata/bpmetadata/schema/valid-metadata.yaml @@ -0,0 +1,10 @@ +apiVersion: blueprints.cloud.google.com/v1alpha1 +kind: BlueprintMetadata +metadata: + name: terraform-google-module +spec: + info: + title: Terraform Google Module + source: + repo: https://github.com/GoogleCloudPlatform/terraform-google-module.git + sourceType: git diff --git a/cli/testdata/bpmetadata/tf/empty-module/outputs.tf b/cli/testdata/bpmetadata/tf/empty-module/outputs.tf new file mode 100644 index 00000000000..49ae9051078 --- /dev/null +++ b/cli/testdata/bpmetadata/tf/empty-module/outputs.tf @@ -0,0 +1,37 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file was automatically generated from a template in ./autogen/main + +output "cluster_id" { + description = "Cluster ID" +} + +output "endpoint" { + sensitive = true + description = "Cluster endpoint" + value = local.cluster_endpoint + depends_on = [ + /* Nominally, the endpoint is populated as soon as it is known to Terraform. + * However, the cluster may not be in a usable state yet. Therefore any + * resources dependent on the cluster being up will fail to deploy. With + * this explicit dependency, dependent resources can wait for the cluster + * to be up. + */ + google_container_cluster.primary, + google_container_node_pool.pools, + ] +} diff --git a/cli/testdata/bpmetadata/tf/empty-module/variables.tf b/cli/testdata/bpmetadata/tf/empty-module/variables.tf new file mode 100644 index 00000000000..3d92e041dac --- /dev/null +++ b/cli/testdata/bpmetadata/tf/empty-module/variables.tf @@ -0,0 +1,34 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file was automatically generated from a template in ./autogen/main + +variable "" { // Empty variable name + description = "The project ID to host the cluster in" + required = false +} + +variable "description" { + description = "The description of the cluster" + type = string + default = "some description" +} + +variable "regional" { + type = bool + description = "Whether is a regional cluster" + default = true +} diff --git a/cli/testdata/bpmetadata/tf/iam-multi-level.tf b/cli/testdata/bpmetadata/tf/iam-multi-level.tf new file mode 100644 index 00000000000..4a5941b0265 --- /dev/null +++ b/cli/testdata/bpmetadata/tf/iam-multi-level.tf @@ -0,0 +1,16 @@ +locals { + int_required_roles = [ + "roles/cloudsql.admin", + "roles/compute.networkAdmin", + "roles/iam.serviceAccountAdmin", + "roles/resourcemanager.projectIamAdmin", + "roles/storage.admin", + "roles/workflows.admin", + "roles/cloudscheduler.admin", + "roles/iam.serviceAccountUser" + ] + int_required_project_roles = [ + "roles/owner", + "roles/storage.admin" + ] +} diff --git a/cli/testdata/bpmetadata/tf/iam.tf b/cli/testdata/bpmetadata/tf/iam.tf new file mode 100644 index 00000000000..2f88d44842d --- /dev/null +++ b/cli/testdata/bpmetadata/tf/iam.tf @@ -0,0 +1,12 @@ +locals { + int_required_roles = [ + "roles/cloudsql.admin", + "roles/compute.networkAdmin", + "roles/iam.serviceAccountAdmin", + "roles/resourcemanager.projectIamAdmin", + "roles/storage.admin", + "roles/workflows.admin", + "roles/cloudscheduler.admin", + "roles/iam.serviceAccountUser" + ] +} \ No newline at end of file diff --git a/cli/testdata/bpmetadata/tf/invalid-module/outputs.tf b/cli/testdata/bpmetadata/tf/invalid-module/outputs.tf new file mode 100644 index 00000000000..49ae9051078 --- /dev/null +++ b/cli/testdata/bpmetadata/tf/invalid-module/outputs.tf @@ -0,0 +1,37 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file was automatically generated from a template in ./autogen/main + +output "cluster_id" { + description = "Cluster ID" +} + +output "endpoint" { + sensitive = true + description = "Cluster endpoint" + value = local.cluster_endpoint + depends_on = [ + /* Nominally, the endpoint is populated as soon as it is known to Terraform. + * However, the cluster may not be in a usable state yet. Therefore any + * resources dependent on the cluster being up will fail to deploy. With + * this explicit dependency, dependent resources can wait for the cluster + * to be up. + */ + google_container_cluster.primary, + google_container_node_pool.pools, + ] +} diff --git a/cli/testdata/bpmetadata/tf/invalid-module/variables.tf b/cli/testdata/bpmetadata/tf/invalid-module/variables.tf new file mode 100644 index 00000000000..f0fc8257b26 --- /dev/null +++ b/cli/testdata/bpmetadata/tf/invalid-module/variables.tf @@ -0,0 +1,34 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file was automatically generated from a template in ./autogen/main + +variable { // No variable name + description = "The project ID to host the cluster in" + required = false +} + +variable "description" { + description = "The description of the cluster" + type = string + default = "some description" +} + +variable "regional" { + type = bool + description = "Whether is a regional cluster" + default = true +} diff --git a/cli/testdata/bpmetadata/tf/main.tf b/cli/testdata/bpmetadata/tf/main.tf new file mode 100644 index 00000000000..4f66d58ec8d --- /dev/null +++ b/cli/testdata/bpmetadata/tf/main.tf @@ -0,0 +1,44 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +module "gke-project-1" { + source = "terraform-google-modules/project-factory/google" + version = "~> 13.0" + + activate_apis = [ + "cloudkms.googleapis.com", + "cloudresourcemanager.googleapis.com", + "container.googleapis.com", + "pubsub.googleapis.com", + "serviceusage.googleapis.com", + "storage-api.googleapis.com", + "anthos.googleapis.com", + "anthosconfigmanagement.googleapis.com", + "logging.googleapis.com", + "meshca.googleapis.com", + "meshtelemetry.googleapis.com", + "meshconfig.googleapis.com", + "cloudresourcemanager.googleapis.com", + "monitoring.googleapis.com", + "stackdriver.googleapis.com", + "cloudtrace.googleapis.com", + "meshca.googleapis.com", + "iamcredentials.googleapis.com", + "gkeconnect.googleapis.com", + "privateca.googleapis.com", + "gkehub.googleapis.com" + ] +} diff --git a/cli/testdata/bpmetadata/tf/provider-versions-bad.tf b/cli/testdata/bpmetadata/tf/provider-versions-bad.tf new file mode 100644 index 00000000000..787414dc861 --- /dev/null +++ b/cli/testdata/bpmetadata/tf/provider-versions-bad.tf @@ -0,0 +1,19 @@ +terraform { + required_version = ">= 0.13.0" + + required_providers { + google = { + version = ">= 4.4.0, < 7" + } + google-beta = { + source = "hashicorp/google-beta" + } + } + + provider_meta "google" { + module_name = "blueprints/terraform/terraform-google-kubernetes-engine:hub/v23.1.0" + } + provider_meta "google-beta" { + module_name = "blueprints/terraform/terraform-google-kubernetes-engine:hub/v23.1.0" + } +} diff --git a/cli/testdata/bpmetadata/tf/provider-versions-empty.tf b/cli/testdata/bpmetadata/tf/provider-versions-empty.tf new file mode 100644 index 00000000000..6e23b30fe84 --- /dev/null +++ b/cli/testdata/bpmetadata/tf/provider-versions-empty.tf @@ -0,0 +1,13 @@ +terraform { + required_version = ">= 0.13.0" + + required_providers { + } + + provider_meta "google" { + module_name = "blueprints/terraform/terraform-google-kubernetes-engine:hub/v23.1.0" + } + provider_meta "google-beta" { + module_name = "blueprints/terraform/terraform-google-kubernetes-engine:hub/v23.1.0" + } +} diff --git a/cli/testdata/bpmetadata/tf/sample-module/outputs.tf b/cli/testdata/bpmetadata/tf/sample-module/outputs.tf new file mode 100644 index 00000000000..49ae9051078 --- /dev/null +++ b/cli/testdata/bpmetadata/tf/sample-module/outputs.tf @@ -0,0 +1,37 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file was automatically generated from a template in ./autogen/main + +output "cluster_id" { + description = "Cluster ID" +} + +output "endpoint" { + sensitive = true + description = "Cluster endpoint" + value = local.cluster_endpoint + depends_on = [ + /* Nominally, the endpoint is populated as soon as it is known to Terraform. + * However, the cluster may not be in a usable state yet. Therefore any + * resources dependent on the cluster being up will fail to deploy. With + * this explicit dependency, dependent resources can wait for the cluster + * to be up. + */ + google_container_cluster.primary, + google_container_node_pool.pools, + ] +} diff --git a/cli/testdata/bpmetadata/tf/sample-module/terraform.tfstate b/cli/testdata/bpmetadata/tf/sample-module/terraform.tfstate new file mode 100644 index 00000000000..1d19d377e55 --- /dev/null +++ b/cli/testdata/bpmetadata/tf/sample-module/terraform.tfstate @@ -0,0 +1,25 @@ +{ + "format_version": "1.0", + "terraform_version": "1.2.0", + "values": { + "outputs": { + "cluster_id": { + "type": "string", + "value": "sample-cluster-id" + }, + "endpoint": { + "type": [ + "object", + { + "host": "string", + "port": "number" + } + ], + "value": { + "host": "127.0.0.1", + "port": 443 + } + } + } + } +} \ No newline at end of file diff --git a/cli/testdata/bpmetadata/tf/sample-module/variables.tf b/cli/testdata/bpmetadata/tf/sample-module/variables.tf new file mode 100644 index 00000000000..acef73a6227 --- /dev/null +++ b/cli/testdata/bpmetadata/tf/sample-module/variables.tf @@ -0,0 +1,34 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file was automatically generated from a template in ./autogen/main + +variable "project_id" { + description = "The project ID to host the cluster in" + required = false +} + +variable "description" { + description = "The description of the cluster" + type = string + default = "some description" +} + +variable "regional" { + type = bool + description = "Whether is a regional cluster" + default = true +} diff --git a/cli/testdata/bpmetadata/tf/versions-bad-all.tf b/cli/testdata/bpmetadata/tf/versions-bad-all.tf new file mode 100644 index 00000000000..d997e93c09f --- /dev/null +++ b/cli/testdata/bpmetadata/tf/versions-bad-all.tf @@ -0,0 +1,7 @@ +terraform { + required_version_not_good = ">= 0.13.0" + + provider_meta "google" { + module_name = "blueprints/terraform/terraform-google-kubernetes-engine:hub/23.1.0" + } +} diff --git a/cli/testdata/bpmetadata/tf/versions-bad-core.tf b/cli/testdata/bpmetadata/tf/versions-bad-core.tf new file mode 100644 index 00000000000..cd4e612ce09 --- /dev/null +++ b/cli/testdata/bpmetadata/tf/versions-bad-core.tf @@ -0,0 +1,7 @@ +terraform { + required_version_not_good = ">= 0.13.0" + + provider_meta "google" { + module_name = "blueprints/terraform/terraform-google-kubernetes-engine:hub/v23.1.0" + } +} diff --git a/cli/testdata/bpmetadata/tf/versions-bad-module.tf b/cli/testdata/bpmetadata/tf/versions-bad-module.tf new file mode 100644 index 00000000000..19bb714a8e0 --- /dev/null +++ b/cli/testdata/bpmetadata/tf/versions-bad-module.tf @@ -0,0 +1,7 @@ +terraform { + required_version = ">= 0.13.0" + + provider_meta "google" { + module_name = "blueprints/terraform/terraform-google-kubernetes-engine:hub/23.1.0" + } +} diff --git a/cli/testdata/bpmetadata/tf/versions-beta.tf b/cli/testdata/bpmetadata/tf/versions-beta.tf new file mode 100644 index 00000000000..c505ed8375c --- /dev/null +++ b/cli/testdata/bpmetadata/tf/versions-beta.tf @@ -0,0 +1,21 @@ +terraform { + required_version = ">= 0.13.0" + + required_providers { + google = { + source = "hashicorp/google" + version = ">= 4.4.0, < 7" + } + google-beta = { + source = "hashicorp/google-beta" + version = ">= 4.4.0, < 7" + } + } + + provider_meta "google" { + module_name = "blueprints/terraform/terraform-google-kubernetes-engine:hub/v23.1.0" + } + provider_meta "google-beta" { + module_name = "blueprints/terraform/terraform-google-kubernetes-engine:hub/v23.1.0" + } +} diff --git a/cli/testdata/bpmetadata/tf/versions-core.tf b/cli/testdata/bpmetadata/tf/versions-core.tf new file mode 100644 index 00000000000..450c502a03e --- /dev/null +++ b/cli/testdata/bpmetadata/tf/versions-core.tf @@ -0,0 +1,3 @@ +terraform { + required_version = ">= 0.13.0" +} diff --git a/cli/testdata/bpmetadata/tf/versions-module.tf b/cli/testdata/bpmetadata/tf/versions-module.tf new file mode 100644 index 00000000000..a37cde08d71 --- /dev/null +++ b/cli/testdata/bpmetadata/tf/versions-module.tf @@ -0,0 +1,5 @@ +terraform { + provider_meta "google" { + module_name = "blueprints/terraform/terraform-google-kubernetes-engine:hub/v23.1.0" + } +} diff --git a/cli/testdata/bpmetadata/tf/versions.tf b/cli/testdata/bpmetadata/tf/versions.tf new file mode 100644 index 00000000000..517b35c318d --- /dev/null +++ b/cli/testdata/bpmetadata/tf/versions.tf @@ -0,0 +1,7 @@ +terraform { + required_version = ">= 0.13.0" + + provider_meta "google" { + module_name = "blueprints/terraform/terraform-google-kubernetes-engine:hub/v23.1.0" + } +} diff --git a/cli/testdata/catalog/csv-verbose.expected b/cli/testdata/catalog/csv-verbose.expected new file mode 100644 index 00000000000..ffa47e0348e --- /dev/null +++ b/cli/testdata/catalog/csv-verbose.expected @@ -0,0 +1,4 @@ +Repo,Stars,Created,Description +terraform-google-bar,5,2021-01-03,lorem ipsom +terraform-google-foo,10,2022-11-03, +terraform-foo,10,2022-11-03, diff --git a/cli/testdata/catalog/csv.expected b/cli/testdata/catalog/csv.expected new file mode 100644 index 00000000000..5069f2db665 --- /dev/null +++ b/cli/testdata/catalog/csv.expected @@ -0,0 +1,4 @@ +Repo,Stars,Created +terraform-google-bar,5,2021-01-03 +terraform-google-foo,10,2022-11-03 +terraform-foo,10,2022-11-03 diff --git a/cli/testdata/catalog/html.expected b/cli/testdata/catalog/html.expected new file mode 100644 index 00000000000..80f30889dc1 --- /dev/null +++ b/cli/testdata/catalog/html.expected @@ -0,0 +1,33 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
CategoryBlueprintDescription
End to end, Data analyticsai-notebookProtect confidential data in Vertex AI Workbench notebooks
End to endfabricAdvanced examples designed for prototyping
Containers, End-to-endfoo
\ No newline at end of file diff --git a/cli/testdata/catalog/single-html.expected b/cli/testdata/catalog/single-html.expected new file mode 100644 index 00000000000..9712ecd219e --- /dev/null +++ b/cli/testdata/catalog/single-html.expected @@ -0,0 +1,19 @@ + + + + + + + + + + + + + + + + + +
CategoryBlueprintDescription
Containersbarlorem ipsom
\ No newline at end of file diff --git a/cli/testdata/catalog/table.expected b/cli/testdata/catalog/table.expected new file mode 100644 index 00000000000..fbb0ae0b1d0 --- /dev/null +++ b/cli/testdata/catalog/table.expected @@ -0,0 +1,7 @@ ++----------------------+-------+------------+ +| REPO | STARS | CREATED | ++----------------------+-------+------------+ +| terraform-google-bar | 5 | 2021-01-03 | +| terraform-google-foo | 10 | 2022-11-03 | +| terraform-foo | 10 | 2022-11-03 | ++----------------------+-------+------------+ diff --git a/cli/tests/create-update-test.bats b/cli/tests/create-update-test.bats index 0cacf8b0599..e3992d48cbd 100644 --- a/cli/tests/create-update-test.bats +++ b/cli/tests/create-update-test.bats @@ -73,4 +73,4 @@ function teardown() { run gcloud compute networks subnets list --project "${CLOUD_FOUNDATION_PROJECT_ID}" [[ ! "$output" =~ "cftcli-test-subnetwork-1" ]] [[ ! "$output" =~ "cftcli-test-subnetwork-2" ]] -} \ No newline at end of file +} diff --git a/cli/util/file.go b/cli/util/file.go new file mode 100644 index 00000000000..26b6ae3b6ec --- /dev/null +++ b/cli/util/file.go @@ -0,0 +1,106 @@ +package util + +import ( + "errors" + "fmt" + "io/fs" + "os" + "path/filepath" + "regexp" + "strings" +) + +const ( + tfInternalDirPrefix = ".terraform" +) + +// skipDiscoverDirs are directories that are skipped when discovering test cases. +var skipDiscoverDirs = map[string]bool{ + "test": true, + "build": true, + ".git": true, +} + +// walkTerraformDirs traverses a provided path to return a list of directories +// that hold terraform configs while skiping internal folders that have a +// .terraform.* prefix +func WalkTerraformDirs(topLevelPath string) ([]string, error) { + var tfDirs []string + err := filepath.Walk(topLevelPath, func(path string, info fs.FileInfo, err error) error { + if err != nil { + return fmt.Errorf("failure in accessing the path %q: %w\n", path, err) + } + if info.IsDir() && (strings.HasPrefix(info.Name(), tfInternalDirPrefix) || skipDiscoverDirs[info.Name()]) { + return filepath.SkipDir + } + + if !info.IsDir() && strings.HasSuffix(info.Name(), ".tf") { + tfDirs = append(tfDirs, filepath.Dir(path)) + return filepath.SkipDir + } + + return nil + }) + if err != nil { + return nil, fmt.Errorf("error walking the path %q: %w\n", topLevelPath, err) + } + + return tfDirs, nil +} + +func FindFilesWithPattern(dir string, pattern string, skipPaths []string) ([]string, error) { + f, err := os.Stat(dir) + if err != nil { + return nil, fmt.Errorf("no such dir: %w", err) + } + if !f.IsDir() { + return nil, fmt.Errorf("expected dir %s: got file", dir) + } + + re, err := regexp.Compile(pattern) + if err != nil { + return nil, fmt.Errorf("invalid regex: %w", err) + } + + filePaths := []string{} + + err = filepath.WalkDir(dir, func(path string, file fs.DirEntry, err error) error { + if err != nil { + return err + } + + if !re.MatchString(filepath.Base(path)) { + return nil + } + + for _, p := range skipPaths { + if strings.Contains(path, p) { + return nil + } + } + + if !file.IsDir() { + filePaths = append(filePaths, path) + } + + return nil + }) + + if err != nil { + fmt.Printf("error accessing the path: %q. error: %v\n", dir, err) + return nil, err + } + + return filePaths, nil +} + +func Exists(path string) (bool, error) { + _, err := os.Stat(path) + if err == nil { + return true, nil + } + if errors.Is(err, os.ErrNotExist) { + return false, nil + } + return false, fmt.Errorf("error checking if %s exists: %w", path, err) +} diff --git a/cli/util/file_test.go b/cli/util/file_test.go new file mode 100644 index 00000000000..f170246e99b --- /dev/null +++ b/cli/util/file_test.go @@ -0,0 +1,139 @@ +package util + +import ( + "path" + "testing" + + "github.com/stretchr/testify/assert" +) + +const ( + testContentPath = "../testdata/bpmetadata" +) + +func TestTFDirectories(t *testing.T) { + tests := []struct { + name string + path string + want []string + wantErr bool + }{ + { + name: "multiple directories", + path: "content/examples", + want: []string{ + "../testdata/bpmetadata/content/examples/acm/acm-terraform-blog-part1/terraform", + "../testdata/bpmetadata/content/examples/acm/acm-terraform-blog-part2", + "../testdata/bpmetadata/content/examples/simple_regional", + "../testdata/bpmetadata/content/examples/simple_regional_beta", + }, + }, + { + name: "single directory", + path: "content/examples/simple_regional_beta", + want: []string{ + "../testdata/bpmetadata/content/examples/simple_regional_beta", + }, + }, + { + name: "single directory", + path: "content/no_directory", + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + path := path.Join(testContentPath, tt.path) + got, err := WalkTerraformDirs(path) + if (err != nil) != tt.wantErr { + t.Errorf("WalkTerraformDirs() error = %v, wantErr %v", err, tt.wantErr) + return + } + + assert.Equal(t, got, tt.want) + }) + } +} + +func TestFindFilesWithPattern(t *testing.T) { + tests := []struct { + name string + path string + pattern string + skipPaths []string + want []string + wantErr bool + }{ + { + name: "pattern for metadata files", + path: "", + pattern: `^metadata(?:.display)?.yaml$`, + want: []string{ + "../testdata/bpmetadata/content/examples/acm/acm-terraform-blog-part1/terraform/metadata.yaml", + "../testdata/bpmetadata/content/examples/acm/metadata.display.yaml", + "../testdata/bpmetadata/content/examples/acm/metadata.yaml", + "../testdata/bpmetadata/content/examples/simple_regional/metadata.yaml", + }, + }, + { + name: "pattern for tf files", + path: "content/examples/simple_regional", + pattern: `.+.tf$`, + want: []string{ + "../testdata/bpmetadata/content/examples/simple_regional/main.tf", + "../testdata/bpmetadata/content/examples/simple_regional/modules/submodule-01/main.tf", + }, + }, + { + name: "pattern for tf files skipping a path", + path: "content/examples", + skipPaths: []string{ + "examples/acm", + }, + pattern: `.+.tf$`, + want: []string{ + "../testdata/bpmetadata/content/examples/simple_regional/main.tf", + "../testdata/bpmetadata/content/examples/simple_regional/modules/submodule-01/main.tf", + "../testdata/bpmetadata/content/examples/simple_regional_beta/main.tf", + "../testdata/bpmetadata/content/examples/simple_regional_beta/variables.tf", + }, + }, + { + name: "pattern for tf files skipping multiple paths", + path: "content/examples", + skipPaths: []string{ + "examples/acm", + "examples/simple_regional_beta", + }, + pattern: `.+.tf$`, + want: []string{ + "../testdata/bpmetadata/content/examples/simple_regional/main.tf", + "../testdata/bpmetadata/content/examples/simple_regional/modules/submodule-01/main.tf", + }, + }, + { + name: "pattern for avoiding non-metadata yaml files", + path: "schema", + pattern: `^metadata(?:.display)?.yaml$`, + want: []string{}, + }, + { + name: "invalid pattern", + pattern: `*.txt`, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + path := path.Join(testContentPath, tt.path) + got, err := FindFilesWithPattern(path, tt.pattern, tt.skipPaths) + if (err != nil) != tt.wantErr { + t.Errorf("FindFilesWithPattern() error = %v, wantErr %v", err, tt.wantErr) + return + } + + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/cli/util/git.go b/cli/util/git.go new file mode 100644 index 00000000000..728ecf70c47 --- /dev/null +++ b/cli/util/git.go @@ -0,0 +1,71 @@ +package util + +import ( + "fmt" + "net/url" + "regexp" + "strings" + + git "github.com/go-git/go-git/v5" +) + +const defaultRemote = "origin" + +var githubSSHRemoteRegex = regexp.MustCompile(`git@github\.com:`) + +// getRepoName finds upstream repo name from a given repo directory +func GetRepoName(repoUrl string) (string, error) { + if repoUrl == "" { + return "", fmt.Errorf("empty URL") + } + + u, err := url.Parse(repoUrl) + if err != nil { + return "", fmt.Errorf("malformed repo URL: %w", err) + } + + trimmedRemotePath := strings.TrimSuffix(u.Path, "/") + i := strings.LastIndex(trimmedRemotePath, "/") + repoName := strings.TrimSuffix(trimmedRemotePath[i+1:], ".git") + + return repoName, nil +} + +// GetRepoUrlAndRootPath finds upstream repo URL and the root local path +func GetRepoUrlAndRootPath(dir string) (string, string, error) { + opt := &git.PlainOpenOptions{DetectDotGit: true} + r, err := git.PlainOpenWithOptions(dir, opt) + if err != nil { + return "", "", fmt.Errorf("error opening git dir %s: %w", dir, err) + } + + repoRootPath := "" + repoURL := "" + rm, err := r.Remote(defaultRemote) + if err != nil { + return repoURL, repoRootPath, fmt.Errorf("error finding remote %s in git dir %s: %w", defaultRemote, dir, err) + } + + if len(rm.Config().URLs) > 0 { + repoURL = resolveRemoteGitHubSSHURLToHTTPS(rm.Config().URLs[0]) + } + + if repoURL == "" { + return repoURL, repoRootPath, fmt.Errorf("no remote urls") + } + + w, err := r.Worktree() + if err != nil { + return repoURL, repoRootPath, fmt.Errorf("unable to parse worktree for git: %w", err) + } + repoRootPath = w.Filesystem.Root() + return repoURL, repoRootPath, nil +} + +func resolveRemoteGitHubSSHURLToHTTPS(URL string) string { + if !githubSSHRemoteRegex.MatchString(URL) { + return URL + } + + return githubSSHRemoteRegex.ReplaceAllString(URL, "https://github.com/") +} diff --git a/cli/util/git_test.go b/cli/util/git_test.go new file mode 100644 index 00000000000..4cedbc25799 --- /dev/null +++ b/cli/util/git_test.go @@ -0,0 +1,175 @@ +package util + +import ( + "os" + "path" + "strings" + "testing" + + git "github.com/go-git/go-git/v5" + "github.com/go-git/go-git/v5/config" +) + +func TestGetRepoUrlAndRootPath(t *testing.T) { + tests := []struct { + name string + repo string + subDir string + remote string + wantURL string + wantErr bool + }{ + { + name: "simple", + repo: "https://github.com/foo/bar", + remote: defaultRemote, + wantURL: "https://github.com/foo/bar", + }, + { + name: "simple trailing", + repo: "https://gitlab.com/foo/bar/", + remote: defaultRemote, + wantURL: "https://gitlab.com/foo/bar/", + }, + { + name: "no scheme", + repo: "github.com/foo/bar", + remote: defaultRemote, + wantURL: "github.com/foo/bar", + }, + { + name: "invalid remote", + repo: "github.com/foo/bar", + remote: "foo", + wantErr: true, + }, + { + name: "simple w/ module sub directory", + repo: "https://github.com/foo/bar", + subDir: "modules/bp1", + remote: defaultRemote, + wantURL: "https://github.com/foo/bar", + }, + { + name: "simple w/ ssh remote", + repo: "git@github.com:foo/bar.git", + remote: defaultRemote, + wantURL: "https://github.com/foo/bar.git", + }, + { + name: "non git@github.com ssh remote", + repo: "git@githubAcom:foo/bar.git", + remote: defaultRemote, + wantURL: "git@githubAcom:foo/bar.git", + }, + { + name: "simple w/ module sub directory w/ ssh remote", + repo: "git@github.com:foo/bar.git", + remote: defaultRemote, + subDir: "modules/bp1", + wantURL: "https://github.com/foo/bar.git", + }, + { + name: "gitlab repo url should not be modified", + repo: "git@gitlab.com:foo/bar.git", + remote: defaultRemote, + wantURL: "git@gitlab.com:foo/bar.git", + }, + { + name: "empty repo url", + repo: "", + remote: defaultRemote, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + dir := tempGitRepoWithRemote(t, tt.repo, tt.remote, tt.subDir) + gotURL, gotPath, err := GetRepoUrlAndRootPath(dir) + if (err != nil) != tt.wantErr { + t.Errorf("GetRepoUrlAndRootPath() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if gotURL != tt.wantURL { + t.Errorf("URL - GetRepoUrlAndRootPath() = %v, want %v", gotURL, tt.wantURL) + } + + wantPath := strings.TrimSuffix(strings.ReplaceAll(dir, tt.subDir, ""), "/") + if tt.wantErr { + wantPath = "" + } + + if gotPath != wantPath { + t.Errorf("RootPath - GetRepoUrlAndRootPath() = %v, want %v", gotPath, wantPath) + } + }) + } +} + +func TestGetRepoNameFromUrl(t *testing.T) { + tests := []struct { + name string + repoUrl string + want string + wantErr bool + }{ + { + name: "simple", + repoUrl: "https://github.com/foo/bar", + want: "bar", + }, + { + name: "no scheme", + repoUrl: "github.com/foo/bar", + want: "bar", + }, + { + name: "gerrit repo", + repoUrl: "sso://team/foo/bar", + want: "bar", + }, + { + name: "empty Url", + repoUrl: "", + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := GetRepoName(tt.repoUrl) + if (err != nil) != tt.wantErr { + t.Errorf("getRepoName() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("getRepoName() = %v, want %v", got, tt.want) + } + }) + } +} + +func tempGitRepoWithRemote(t *testing.T, repoURL, remote string, subDir string) string { + t.Helper() + dir := t.TempDir() + if subDir != "" { + err := os.MkdirAll(path.Join(dir, subDir), 0755) + if err != nil { + t.Fatalf("Error sub dir for temp git repo: %v", err) + } + } + + r, err := git.PlainInit(dir, false) + if err != nil { + t.Fatalf("Error creating git repo in tempdir: %v", err) + } + _, err = r.CreateRemote(&config.RemoteConfig{ + Name: remote, + URLs: []string{repoURL}, + }) + if err != nil { + t.Fatalf("Error creating remote in tempdir repo: %v", err) + } + + return path.Join(dir, subDir) +} diff --git a/cli/util/prompt.go b/cli/util/prompt.go new file mode 100644 index 00000000000..73e0ad30c85 --- /dev/null +++ b/cli/util/prompt.go @@ -0,0 +1,28 @@ +package util + +import ( + "fmt" + "os" + "strings" + + "github.com/manifoldco/promptui" +) + +// PromptSelect prompts a user to select a value from given items. +func PromptSelect(label string, items []string) string { + prompt := promptui.Select{ + Label: label, + Items: items, + Searcher: func(input string, index int) bool { + return strings.Contains(items[index], input) + }, + StartInSearchMode: true, + } + _, result, err := prompt.Run() + if err != nil { + fmt.Printf("Prompt failed %v\n", err) + os.Exit(1) + } + fmt.Printf("Selected: %s\n", result) + return result +} diff --git a/config-connector/solutions/README.md b/config-connector/solutions/README.md deleted file mode 100644 index ce575c7f99c..00000000000 --- a/config-connector/solutions/README.md +++ /dev/null @@ -1,102 +0,0 @@ -# Config Connector Solutions - -## Overview - -Config Connector Solutions provides best practice solutions -to common cloud applications, formatted as YAML definitions -for Config Connector CRDs. These YAMLs can be applied to -clusters running [Config -Connector](https://cloud.google.com/config-connector/docs/how-to/getting-started). - -## Structure - -Folders under this directory denote general solution areas. -In each solution area folder, there are folders for each package -& customization tool (currently helm and kpt), under which are nested all available solutions in -that solution area and package format. - -## Solutions - -The full list of solutions grouped by area: - -* **apps** - automate creation of a canonical sample application and provision required GCP services with Config Connector - * wordpress [ [helm](apps/helm/wordpress) ] - provision Wordpress application powered by GCP MySQL database -* **projects** - automate creation of GCP projects, folders and project services - using Config Connector - * owned-project [ [kpt](projects/kpt/owned-project) ] - grant the project - owner role - * project-hierarcy [ [kpt](projects/kpt/project-hierarchy) ] - get started - with a folder and a project - * project-services [ [kpt](projects/kpt/project-services) ] - enable GCP API - for a project - * shared-vpc [ [kpt](projects/kpt/shared-vpc) ] - create a shared VPC network - * simple-project [ [kpt](projects/kpt/simple-project) ] - get started with a - simple project -* **iam** - automate the management of IAM roles for resources using Config - Connector - * folder-iam [ [kpt](iam/kpt/folder-iam) ] - grant an IAM role to a GCP folder - * kms-crypto-key [ [kpt](iam/kpt/kms-crypto-key) ] - grant an IAM role to a - KMS crypto key - * kms-key-ring [ [kpt](iam/kpt/kms-key-ring) ] - grant an IAM role to a KMS - key ring - * member-iam [ [kpt](iam/kpt/member-iam) ] - grant a service account an IAM - role to a project - * project-iam [ [kpt](iam/kpt/project-iam) ] - grant an IAM role to a project - * pubsub-subscription [ [kpt](iam/kpt/pubsub-subscription) ] - grant an IAM - role to a Pub/Sub subscription - * pubsub-topic [ [kpt](iam/kpt/pubsub-topic) ] - grant an IAM role to a - Pub/Sub topic - * service-account [ [helm](iam/helm/service-account) ] \[ [kpt]( - iam/kpt/service-account) ] - grant an IAM role to a service account - * storage-bucket-iam [ [kpt](iam/kpt/storage-bucket-iam) ] - grant an IAM role - to a storage bucket - * subnet [ [kpt](iam/kpt/subnetp) ] - grant an IAM role to a subnetwork -* **sql** - automate the creation of Cloud SQL instances, databases, and users - using Config Connector - * mysql-ha [ [kpt](sql/kpt/mysql-ha) ] - create a high availability MySQL - cluster - * mysql-private [ [kpt](sql/kpt/mysql-private) ] - create a private MySQL - database - * mysql-public [ [kpt](sql/kpt/mysql-public) ] - create a public MySQL - database - * postgres-ha [ [kpt](sql/kpt/postgres-ha) ] - create a high availability - PostgreSQL cluster - * postgres-public [ [kpt](sql/kpt/postgres-public) ] - create a public - PostgreSQL database - - -## Usage - -### helm - -These solutions are consumable as [helm charts](https://helm.sh/docs/topics/charts/). -Common targets for modification are listed in `values.yaml`. - -[Install helm](https://helm.sh/docs/intro/install/). These solutions support Helm v.3+. - -Common operations, where `PATH` is the path to the relevant solution folder: -* Showing values: `helm show values PATH` -* Validating chart: `helm template PATH` -* Setting chart: `helm install PATH -generate-name` - -Comprehensive documentation at -[https://helm.sh/docs/](https://helm.sh/docs/). - -### kpt - -These samples are consumable as [kpt -packages](https://googlecontainertools.github.io/kpt/). -Common targets for modification are provided kpt setters, -and can be listed with `kpt cfg list-setters`. - -* Installing kpt: follow the instructions on [the kpt -GitHub](https://github.com/GoogleContainerTools/kpt). -* Listing setters: See which values are available for kpt to change `kpt cfg list-setters` -* Setting setters: `kpt cfg set DIR NAME VALUE --set-by NAME` - -Comprehensive documentation at -[https://googlecontainertools.github.io/kpt/](https://googlecontainertools.github.io/kpt/). - -## License - -Apache 2.0 - See [LICENSE](/LICENSE) for more information. diff --git a/config-connector/solutions/apps/helm/wordpress/README.md b/config-connector/solutions/apps/helm/wordpress/README.md deleted file mode 100644 index 3777aaeefc9..00000000000 --- a/config-connector/solutions/apps/helm/wordpress/README.md +++ /dev/null @@ -1,87 +0,0 @@ -# Wordpress - -================================================== - -## NAME - - wordpress - -## SYNOPSIS - -The WordPress application demonstrates how you can configure a WordPress site powered by GCP MySQL database and using Workload Identity for authentication. - -## CONSUMPTION - - 1. Clone GoogleCloudPlatform/cloud-foundation-toolkit repository - - ```bash - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit.git - ``` - - 1. Go to the wordpress folder: - - ```bash - cd cloud-foundation-toolkit/config-connector/solutions/apps/helm/wordpress - ``` - -## REQUIREMENTS - -1. GKE Cluster with Config Connector and [Workload Identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#enable_workload_identity_on_a_new_cluster). -1. [Helm](../../../README.md#helm) -1. Cloud Resource Manager API needs to be enabled on the project to use [ServiceUsage Resource](https://cloud.google.com/config-connector/docs/reference/resources#service). You can enable it by running: - - ```bash - gcloud services enable cloudresourcemanager.googleapis.com --project [PROJECT_ID] - ``` - -## USAGE - -All steps are run from this directory. - -1. Review and update the values in `./charts/wordpress-gcp/values.yaml`. - - **Note:** Please ensure the value of `database.instanceName` (defaults to `wp-db`) is unique and hasn't been used as an SQL instance name in the last 7 days. -1. Validate and install the sample with Helm. - - ```bash - # validate your chart - helm lint ./charts/wordpress-gcp/ --set google.projectId=[PROJECT_ID] - - # check the output of your chart - helm template ./charts/wordpress-gcp/ --set google.projectId=[PROJECT_ID] - - # install your chart - helm install ./charts/wordpress-gcp/ --set google.projectId=[PROJECT_ID] --generate-name - ``` - -1. The wordpress creation can take up to 10-15 minutes. Throughout the process you can check the status of various components: - - ```bash - # check the status of sqlinstance - kubectl describe sqlinstance [VALUE of database.instanceName] - # check the status of wordpress pod (the output should show that both containers are ready) - kubectl get pods wordpress-0 - ``` - - **Note:** If the pods can't be scheduled because of `Insufficient CPU` issue, please increase the size of nodes in your cluster. - Once the pods are ready, obtain the external IP address of your WordPress application by checking: - - ```bash - kubectl get svc wordpress-external - ``` - - Navigate to this address and validate that you see WordPress installation page. - -1. Clean up the installation: - - ```bash - # list Helm releases to obtain release name - helm list - - # delete release specifying release name from the previous command output. Note that can take a few minutes before all K8s resources are fully deleted. - helm delete [release_name] - ``` - -## LICENSE - -Apache 2.0 - See [LICENSE](/LICENSE) for more information. \ No newline at end of file diff --git a/config-connector/solutions/apps/helm/wordpress/charts/wordpress-gcp/.helmignore b/config-connector/solutions/apps/helm/wordpress/charts/wordpress-gcp/.helmignore deleted file mode 100644 index 3eaebeb0931..00000000000 --- a/config-connector/solutions/apps/helm/wordpress/charts/wordpress-gcp/.helmignore +++ /dev/null @@ -1,22 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode \ No newline at end of file diff --git a/config-connector/solutions/apps/helm/wordpress/charts/wordpress-gcp/Chart.yaml b/config-connector/solutions/apps/helm/wordpress/charts/wordpress-gcp/Chart.yaml deleted file mode 100644 index 6309ea1cdf8..00000000000 --- a/config-connector/solutions/apps/helm/wordpress/charts/wordpress-gcp/Chart.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: v1 -appVersion: "1.0" -description: A Helm chart to deploy WordPress powered by Google Cloud SQL MySQL database and Config Connector to Kubernetes -name: wordpress-gcp -version: 0.1.0 -icon: https://s1.wp.com/i/webclip.png diff --git a/config-connector/solutions/apps/helm/wordpress/charts/wordpress-gcp/templates/gcp-sql-api.yaml b/config-connector/solutions/apps/helm/wordpress/charts/wordpress-gcp/templates/gcp-sql-api.yaml deleted file mode 100644 index 6fedc9c29aa..00000000000 --- a/config-connector/solutions/apps/helm/wordpress/charts/wordpress-gcp/templates/gcp-sql-api.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: serviceusage.cnrm.cloud.google.com/v1beta1 -kind: Service -metadata: - annotations: - cnrm.cloud.google.com/deletion-policy: abandon - cnrm.cloud.google.com/disable-dependent-services: "false" - name: sqladmin.googleapis.com diff --git a/config-connector/solutions/apps/helm/wordpress/charts/wordpress-gcp/templates/gcp-sql-db.yaml b/config-connector/solutions/apps/helm/wordpress/charts/wordpress-gcp/templates/gcp-sql-db.yaml deleted file mode 100644 index bccc574b9b4..00000000000 --- a/config-connector/solutions/apps/helm/wordpress/charts/wordpress-gcp/templates/gcp-sql-db.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: sql.cnrm.cloud.google.com/v1beta1 -kind: SQLDatabase -metadata: - name: {{ required "dbName is required!" .Values.database.dbName }} -spec: - charset: utf8 - instanceRef: - name: {{ required "instanceName is required!" .Values.database.instanceName }} \ No newline at end of file diff --git a/config-connector/solutions/apps/helm/wordpress/charts/wordpress-gcp/templates/gcp-sql-instance.yaml b/config-connector/solutions/apps/helm/wordpress/charts/wordpress-gcp/templates/gcp-sql-instance.yaml deleted file mode 100644 index 22f70343dcf..00000000000 --- a/config-connector/solutions/apps/helm/wordpress/charts/wordpress-gcp/templates/gcp-sql-instance.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: sql.cnrm.cloud.google.com/v1beta1 -kind: SQLInstance -metadata: - name: {{ required "instanceName is required!" .Values.database.instanceName }} -spec: - databaseVersion: {{ required "version is required!" .Values.database.version }} - region: {{ required "region is required!" .Values.google.region }} - settings: - tier: {{ required "tier is required!" .Values.database.tier }} diff --git a/config-connector/solutions/apps/helm/wordpress/charts/wordpress-gcp/templates/gcp-sql-policy-member.yaml b/config-connector/solutions/apps/helm/wordpress/charts/wordpress-gcp/templates/gcp-sql-policy-member.yaml deleted file mode 100644 index 450cd0103e9..00000000000 --- a/config-connector/solutions/apps/helm/wordpress/charts/wordpress-gcp/templates/gcp-sql-policy-member.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: iam.cnrm.cloud.google.com/v1beta1 -kind: IAMPolicyMember -metadata: - name: sql-wp-sa-project-policymember -spec: - member: serviceAccount:sql-wp-sa@{{ required "projectId is required!" .Values.google.projectId }}.iam.gserviceaccount.com - role: roles/cloudsql.client - resourceRef: - kind: Project - apiVersion: resourcemanager.cnrm.cloud.google.com/v1beta1 - external: {{ required "projectId is required!" .Values.google.projectId }} diff --git a/config-connector/solutions/apps/helm/wordpress/charts/wordpress-gcp/templates/gcp-sql-service-account.yaml b/config-connector/solutions/apps/helm/wordpress/charts/wordpress-gcp/templates/gcp-sql-service-account.yaml deleted file mode 100644 index 37e151dc1ca..00000000000 --- a/config-connector/solutions/apps/helm/wordpress/charts/wordpress-gcp/templates/gcp-sql-service-account.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: iam.cnrm.cloud.google.com/v1beta1 -kind: IAMServiceAccount -metadata: - name: sql-wp-sa -spec: - displayName: Service Account for WordPress Config Connector Sample diff --git a/config-connector/solutions/apps/helm/wordpress/charts/wordpress-gcp/templates/gcp-sql-user.yaml b/config-connector/solutions/apps/helm/wordpress/charts/wordpress-gcp/templates/gcp-sql-user.yaml deleted file mode 100644 index 160b3f9fcac..00000000000 --- a/config-connector/solutions/apps/helm/wordpress/charts/wordpress-gcp/templates/gcp-sql-user.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: sql.cnrm.cloud.google.com/v1beta1 -kind: SQLUser -metadata: - name: {{ required "user is required!" .Values.database.user }} -spec: - instanceRef: - name: {{ required "instanceName is required!" .Values.database.instanceName }} - host: "%" - password: - valueFrom: - secretKeyRef: - name: wordpress-cloudsql-db-credentials - key: password diff --git a/config-connector/solutions/apps/helm/wordpress/charts/wordpress-gcp/templates/gcp-wi-policy.yaml b/config-connector/solutions/apps/helm/wordpress/charts/wordpress-gcp/templates/gcp-wi-policy.yaml deleted file mode 100644 index c7f84e6081a..00000000000 --- a/config-connector/solutions/apps/helm/wordpress/charts/wordpress-gcp/templates/gcp-wi-policy.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: iam.cnrm.cloud.google.com/v1beta1 -kind: IAMPolicy -metadata: - name: sql-wp-sa-wi-policy -spec: - resourceRef: - apiVersion: iam.cnrm.cloud.google.com/v1beta1 - kind: IAMServiceAccount - name: sql-wp-sa - bindings: - - role: roles/iam.workloadIdentityUser - members: - - serviceAccount:{{ required "projectId is required!" .Values.google.projectId }}.svc.id.goog[{{ required "Namespace is requried!" .Release.Namespace }}/sql-wp-ksa-wi] diff --git a/config-connector/solutions/apps/helm/wordpress/charts/wordpress-gcp/templates/k8s-external-load-balancer.yaml b/config-connector/solutions/apps/helm/wordpress/charts/wordpress-gcp/templates/k8s-external-load-balancer.yaml deleted file mode 100644 index 724987d7768..00000000000 --- a/config-connector/solutions/apps/helm/wordpress/charts/wordpress-gcp/templates/k8s-external-load-balancer.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: wordpress-external - labels: - app: wordpress -spec: - type: LoadBalancer - ports: - - port: 80 - name: web - targetPort: 80 - protocol: TCP - selector: - app: wordpress diff --git a/config-connector/solutions/apps/helm/wordpress/charts/wordpress-gcp/templates/k8s-service-account.yaml b/config-connector/solutions/apps/helm/wordpress/charts/wordpress-gcp/templates/k8s-service-account.yaml deleted file mode 100644 index 024a0d41408..00000000000 --- a/config-connector/solutions/apps/helm/wordpress/charts/wordpress-gcp/templates/k8s-service-account.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: sql-wp-ksa-wi - annotations: - iam.gke.io/gcp-service-account: sql-wp-sa@{{ required "projectId is required!" .Values.google.projectId }}.iam.gserviceaccount.com diff --git a/config-connector/solutions/apps/helm/wordpress/charts/wordpress-gcp/templates/k8s-sql-db-credentials.yaml b/config-connector/solutions/apps/helm/wordpress/charts/wordpress-gcp/templates/k8s-sql-db-credentials.yaml deleted file mode 100644 index 5d80430b467..00000000000 --- a/config-connector/solutions/apps/helm/wordpress/charts/wordpress-gcp/templates/k8s-sql-db-credentials.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: wordpress-cloudsql-db-credentials -stringData: - projectId: {{ required "projectId is required!" .Values.google.projectId }} - username: {{ required "user is required!" .Values.database.user }} - password: {{ required "password is required!" .Values.database.password }} - connectionName: {{ required "region is required!" .Values.google.region }}:{{ required "instanceName is required!" .Values.database.instanceName }} diff --git a/config-connector/solutions/apps/helm/wordpress/charts/wordpress-gcp/templates/k8s-stateful-set.yaml b/config-connector/solutions/apps/helm/wordpress/charts/wordpress-gcp/templates/k8s-stateful-set.yaml deleted file mode 100644 index e1fd3ca86df..00000000000 --- a/config-connector/solutions/apps/helm/wordpress/charts/wordpress-gcp/templates/k8s-stateful-set.yaml +++ /dev/null @@ -1,86 +0,0 @@ -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: wordpress -spec: - selector: - matchLabels: - app: wordpress - serviceName: "wordpress" - replicas: 1 - template: - metadata: - labels: - app: wordpress - spec: - terminationGracePeriodSeconds: 30 - serviceAccountName: sql-wp-ksa-wi - containers: - - name: wordpress - resources: -{{ toYaml .Values.wordpress.resources | indent 10 }} - image: {{ required "image is required!" .Values.wordpress.image}} - imagePullPolicy: IfNotPresent - env: - - name: WORDPRESS_DB_HOST - value: 127.0.0.1:3306 - - name: WORDPRESS_DB_USER - valueFrom: - secretKeyRef: - name: wordpress-cloudsql-db-credentials - key: username - - name: WORDPRESS_DB_PASSWORD - valueFrom: - secretKeyRef: - name: wordpress-cloudsql-db-credentials - key: password - ports: - - containerPort: 80 - volumeMounts: - - name: wordpress-volume - mountPath: /var/www/html - readinessProbe: - httpGet: - path: / - port: 80 - initialDelaySeconds: 180 - periodSeconds: 10 - timeoutSeconds: 10 - failureThreshold: 10 - successThreshold: 1 - livenessProbe: - httpGet: - path: / - port: 80 - initialDelaySeconds: 30 - periodSeconds: 10 - timeoutSeconds: 10 - failureThreshold: 20 - successThreshold: 1 - - name: cloudsql-proxy - resources: - limits: - cpu: "200m" - memory: "100Mi" - image: gcr.io/cloudsql-docker/gce-proxy:1.11 - env: - - name: CONNECTION_NAME - valueFrom: - secretKeyRef: - name: wordpress-cloudsql-db-credentials - key: connectionName - - name: PROJECT_ID - valueFrom: - secretKeyRef: - name: wordpress-cloudsql-db-credentials - key: projectId - command: ["/cloud_sql_proxy", - "-instances=$(PROJECT_ID):$(CONNECTION_NAME)=tcp:3306"] - volumeClaimTemplates: - - metadata: - name: wordpress-volume - spec: - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: {{ required "storage is required!" .Values.wordpress.storage}} diff --git a/config-connector/solutions/apps/helm/wordpress/charts/wordpress-gcp/values.yaml b/config-connector/solutions/apps/helm/wordpress/charts/wordpress-gcp/values.yaml deleted file mode 100644 index c5602add91d..00000000000 --- a/config-connector/solutions/apps/helm/wordpress/charts/wordpress-gcp/values.yaml +++ /dev/null @@ -1,23 +0,0 @@ -# Default values for opsman. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -google: - projectId: - region: us-central1 - -database: - instanceName: wp-db - dbName: wordpress - version: MYSQL_5_7 - tier: db-f1-micro - user: wordpress - password: change-me - -wordpress: - image: wordpress:5.2.2-apache - storage: 10Gi - resources: - limits: - cpu: "200m" - memory: "100Mi" \ No newline at end of file diff --git a/config-connector/solutions/iam/helm/folder-iam/Chart.yaml b/config-connector/solutions/iam/helm/folder-iam/Chart.yaml deleted file mode 100644 index 99c50129895..00000000000 --- a/config-connector/solutions/iam/helm/folder-iam/Chart.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -name: folder-iam -version: 0.1.0 -description: Configure permissions for a GCP folder diff --git a/config-connector/solutions/iam/helm/folder-iam/README.md b/config-connector/solutions/iam/helm/folder-iam/README.md deleted file mode 100644 index 71a0c2d65d6..00000000000 --- a/config-connector/solutions/iam/helm/folder-iam/README.md +++ /dev/null @@ -1,87 +0,0 @@ -# Folder IAM - -================================================== - -## NAME - - folder-iam - -## SYNOPSIS - - Config Connector compatible YAML files to grant a specific member a role (default to `roles/resourcemanager.folderEditor`) to an existing folder. - -## CONSUMPTION - - 1. Clone GoogleCloudPlatform/cloud-foundation-toolkit repository: - - ```bash - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit.git - ``` - - 1. Go to the folder-iam folder: - - ```bash - cd cloud-foundation-toolkit/config-connector/solutions/iam/helm/folder-iam - ``` - -## REQUIREMENTS - -1. GKE Cluster with Config Connector and [Workload Identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#enable_workload_identity_on_a_new_cluster). - -1. A working Config Connector cluster using the "cnrm-system" service account with _minimally_ the permissions given by the following role on the desired folder: - - `roles/resourcemanager.folderIamAdmin` - -1. Install [Helm](../../../README.md#helm) - -## USAGE - -All steps are running from the current directory ([config-connector/solutions/iam/helm/folder-iam](.)). - -1. Review and update the values in `./values.yaml`. - -1. Validate and install the sample with Helm. - - ```bash - # validate your chart - helm lint . --set iamPolicyMember.iamMember=user:name@example.com,folderID=VALUE - - # check the output of your chart - helm template . --set iamPolicyMember.iamMember=user:name@example.com,folderID=VALUE - - # do a dryrun on your chart and address issues if there are any - helm install . --dry-run --set iamPolicyMember.iamMember=user:name@example.com,folderID=VALUE --generate-name - - # install your chart - helm install . --set iamPolicyMember.iamMember=user:name@example.com,folderID=VALUE --generate-name - ``` - -1. _Optionaly_, you can also change the role granted to the member. (you can find all of the folder related IAM roles - [here](https://cloud.google.com/iam/docs/understanding-roles#resource-manager-roles)): - ```bash - # install your chart with a new IAM role. - helm install . --set iamPolicyMember.role=roles/iam.serviceAccountTokenCreator,iamPolicyMember.iamMember=user:name@example.com,folderID=VALUE --generate-name - ``` - -1. Check the created helm release to verify the installation: - ```bash - helm list - ``` - - Check the status of the IAM Policy Member: - ```bash - kubectl describe iampolicymember iampolicymember-folder-iam - ``` - -1. Clean up the installation: - - ```bash - # list Helm releases to obtain release name - helm list - - # delete release specifying release name from the previous command output. - helm delete [release_name] - ``` - -## LICENSE - -Apache 2.0 - See [LICENSE](/LICENSE) for more information. diff --git a/config-connector/solutions/iam/helm/folder-iam/templates/iampolicymember.yaml b/config-connector/solutions/iam/helm/folder-iam/templates/iampolicymember.yaml deleted file mode 100644 index 9216158e6fc..00000000000 --- a/config-connector/solutions/iam/helm/folder-iam/templates/iampolicymember.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: iam.cnrm.cloud.google.com/v1beta1 -kind: IAMPolicyMember -metadata: - name: iampolicymember-folder-iam -spec: - member: {{ required "IAM member is required!" .Values.iamPolicyMember.iamMember }} - role: {{ .Values.iamPolicyMember.role }} - resourceRef: - apiVersion: resourcemanager.cnrm.cloud.google.com/v1beta1 - kind: Folder - external: {{ required "folder ID is required!" .Values.folderID | quote}} diff --git a/config-connector/solutions/iam/helm/folder-iam/values.yaml b/config-connector/solutions/iam/helm/folder-iam/values.yaml deleted file mode 100644 index 1f841dcdbcc..00000000000 --- a/config-connector/solutions/iam/helm/folder-iam/values.yaml +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -iamPolicyMember: - iamMember: - role: roles/resourcemanager.folderEditor - -folderID: diff --git a/config-connector/solutions/iam/helm/kms-key-ring/Chart.yaml b/config-connector/solutions/iam/helm/kms-key-ring/Chart.yaml deleted file mode 100644 index a5145b3bd99..00000000000 --- a/config-connector/solutions/iam/helm/kms-key-ring/Chart.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -name: kms-key-ring -version: 0.1.0 -description: Grant an IAM role to a member for creating a kms key ring diff --git a/config-connector/solutions/iam/helm/kms-key-ring/README.md b/config-connector/solutions/iam/helm/kms-key-ring/README.md deleted file mode 100644 index d9f4df7e50d..00000000000 --- a/config-connector/solutions/iam/helm/kms-key-ring/README.md +++ /dev/null @@ -1,98 +0,0 @@ -# KMS Key Ring - -================================================== - -## NAME - - kms-key-ring - -## SYNOPSIS - - Config Connector compatible YAML files to create a KMS key ring in your desired project, and grant a specific member a role (default to roles/cloudkms.admin) for accessing the KMS key ring that just created - -## CONSUMPTION - - 1. Clone GoogleCloudPlatform/cloud-foundation-toolkit repository: - - ```bash - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit.git - ``` - - 1. Go to the service account folder: - - ```bash - cd cloud-foundation-toolkit/config-connector/solutions/iam/helm/kms-key-ring - ``` - -## REQUIREMENTS - -1. GKE Cluster with Config Connector and [Workload Identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#enable_workload_identity_on_a_new_cluster). -1. [Helm](../../../README.md#helm) -1. The "cnrm-system" service account assigned with either `roles/cloudkms.admin` or `roles/owner` in the project managed by Config Connector -1. Cloud Key Management Service (KMS) API enabled in the project where Config Connector is installed -1. Cloud Key Management Service (KMS) API enabled in the project managed by Config Connector if it is a different project - -## USAGE - -All steps are run from the current directory ([config-connector/solutions/iam/helm/kms-key-ring](.)). - -1. Review and update the values in `./values.yaml`. - -1. Validate and install the sample with Helm. - - ```bash - # validate your chart - helm lint . --set iamPolicyMember.iamMember=user:name@example.com - - # check the output of your chart - helm template . --set iamPolicyMember.iamMember=user:name@example.com - - # do a dryrun on your chart and address issues if there are any - helm install . --dry-run --set iamPolicyMember.iamMember=user:name@example.com --generate-name - - # install your chart - helm install . --set iamPolicyMember.iamMember=user:name@example.com --generate-name - ``` - -1. _Optionaly_, you can set the name of the KMS keyring (defaults to `allowed-ring`), set the location of the ring (defaults to `us-central1`) and the role to grant (defaults to `roles/pubsub.editor`, full list of roles [here](https://cloud.google.com/iam/docs/understanding-roles#cloud-kms-roles)) by explictly setting them when installing the solution: - - ```bash - # install your chart with a difirent name of the KMS keyring and location - helm install . --set KMSKeyRing.name=your-ring-name,KMSKeyRing.location=us-west1,iamPolicyMember.iamMember=user:name@example.com --generate-name - ``` - Or, - ```bash - # install your chart with a new role - helm install . --set iamPolicyMember.role=roles/cloudkms.importer,iamPolicyMember.iamMember=user:name@example.com --generate-name - ``` - Or set there in one command. - -1. Check the created helm release to verify the installation: - ```bash - helm list - ``` - Check the status of the KMS keyring resource by running: - - Note: By default value of KMS keyring name is ```allowed-ring``` - - ```bash - kubectl describe kmskeyring [KMS Keyring name] - ``` - Check the status of the IAM Policy Member: - ```bash - kubectl describe iampolicymember ring-iam-member - ``` - -1. Clean up the installation: - - ```bash - # list Helm releases to obtain release name - helm list - - # delete release specifying release name from the previous command output. - helm delete [release_name] - ``` - -## LICENSE - -Apache 2.0 - See [LICENSE](/LICENSE) for more information. diff --git a/config-connector/solutions/iam/helm/kms-key-ring/templates/iampolicymember.yaml b/config-connector/solutions/iam/helm/kms-key-ring/templates/iampolicymember.yaml deleted file mode 100644 index dea15ce9753..00000000000 --- a/config-connector/solutions/iam/helm/kms-key-ring/templates/iampolicymember.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: iam.cnrm.cloud.google.com/v1beta1 -kind: IAMPolicyMember -metadata: - name: ring-iam-member -spec: - member: {{ required "IAM member is required!" .Values.iamPolicyMember.iamMember }} - role: {{ .Values.iamPolicyMember.role }} - resourceRef: - apiVersion: kms.cnrm.cloud.google.com/v1beta1 - kind: KMSKeyRing - name: {{ .Values.KMSKeyRing.name }} diff --git a/config-connector/solutions/iam/helm/kms-key-ring/templates/kmskeyring.yaml b/config-connector/solutions/iam/helm/kms-key-ring/templates/kmskeyring.yaml deleted file mode 100644 index ef4102a7929..00000000000 --- a/config-connector/solutions/iam/helm/kms-key-ring/templates/kmskeyring.yaml +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: kms.cnrm.cloud.google.com/v1beta1 -kind: KMSKeyRing -metadata: - name: {{ .Values.KMSKeyRing.name }} -spec: - location: {{ .Values.KMSKeyRing.location }} diff --git a/config-connector/solutions/iam/helm/kms-key-ring/values.yaml b/config-connector/solutions/iam/helm/kms-key-ring/values.yaml deleted file mode 100644 index 5e692819388..00000000000 --- a/config-connector/solutions/iam/helm/kms-key-ring/values.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -iamPolicyMember: - iamMember: - role: roles/cloudkms.admin - -KMSKeyRing: - name: allowed-ring - location: us-central1 diff --git a/config-connector/solutions/iam/helm/member-iam/Chart.yaml b/config-connector/solutions/iam/helm/member-iam/Chart.yaml deleted file mode 100644 index 66863d80a9a..00000000000 --- a/config-connector/solutions/iam/helm/member-iam/Chart.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -name: member-iam -version: 0.1.0 -description: A Helm chart to create a service account in your desired project, and grant it a specific role. diff --git a/config-connector/solutions/iam/helm/member-iam/README.md b/config-connector/solutions/iam/helm/member-iam/README.md deleted file mode 100644 index a4415845645..00000000000 --- a/config-connector/solutions/iam/helm/member-iam/README.md +++ /dev/null @@ -1,107 +0,0 @@ -# Member IAM - -================================================== - -## NAME - - member-iam - -## SYNOPSIS - - Config Connector compatible YAML files to create a service account in your desired project, and grant it a specific role (defaults to `compute.networkAdmin`) in the project. - -## CONSUMPTION - - 1. Clone GoogleCloudPlatform/cloud-foundation-toolkit repository: - - ```bash - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit.git - ``` - - 1. Go to the member-iam folder: - - ```bash - cd cloud-foundation-toolkit/config-connector/solutions/iam/helm/member-iam - ``` - -## REQUIREMENTS - -1. GKE Cluster with [Config Connector installed using a GKE Workload Identity](https://cloud.google.com/config-connector/docs/how-to/install-upgrade-uninstall#workload-identity). -1. [Helm](../../../README.md#helm) -1. The "cnrm-system" service account assigned with `roles/resourcemanager.projectIamAdmin` and `roles/iam.serviceAccountAdmin` or `roles/owner` - role in your desired project (it doesn't need to be the project managed by Config Connector) -1. Cloud Resource Manager API enabled in the project where Config Connector is installed - -## USAGE - -All steps are run from the current directory ([config-connector/solutions/iam/helm/member-iam](.)). - -1. Review and update the values in `./values.yaml`. - -1. Create a namespace. If you want to use your existing namespace skip this step and use your own namespace name instead of `member-iam-solution` in all other steps. - - ```bash - kubectl create namespace member-iam-solution - ``` - -1. Validate and install the sample with Helm. `PROJECT_ID` should be your desired project ID unique with in GCP. - - ```bash - # validate your chart - helm lint . --set projectID=PROJECT_ID --namespace member-iam-solution - - # check the output of your chart - helm template . --set projectID=PROJECT_ID --namespace member-iam-solution - - # do a dryrun on your chart and address issues if there are any - helm install . --dry-run --set projectID=PROJECT_ID --namespace member-iam-solution --generate-name - - # install your chart - helm install . --set projectID=PROJECT_ID --namespace member-iam-solution --generate-name - ``` - -1. _Optionally_, you can also change the service account name `iamPolicyMember.iamMember` (defaults to `member-iam-test`) and role `iamPolicyMember.role` (defaults to `roles/compute.networkAdmin`) - (you can find all the predefined GCP IAM roles [here](https://cloud.google.com/iam/docs/understanding-roles#predefined_roles)): - - ```bash - # install your chart with a diffirent service account name - helm install . --set projectID=PROJECT_ID,iamPolicyMember.iamMember=service-account-name --namespace member-iam-solution --generate-name - ``` - Or, - ```bash - # install your chart with a diffirent role - helm install . --set projectID=PROJECT_ID,iamPolicyMember.role=roles/compute.networkUser --namespace member-iam-solution --generate-name - ``` - Or set both in one command. - -1. Check the created helm release to verify the installation: - - ```bash - helm list - ``` - - Check the status of the IAM Service Account: - - ```bash - kubectl describe iamserviceaccount [Value of iamPolicyMember.iamMember] - ``` - - Check the status of the IAM Policy Member: - - ```bash - kubectl describe iampolicymember project-iam-member - ``` - -1. Clean up the installation: - - ```bash - # list Helm releases to obtain release name - helm list - - # delete release specifying release name from the previous command output. - helm delete [release_name] - ``` - -## LICENSE - -Apache 2.0 - See [LICENSE](/LICENSE) for more information. diff --git a/config-connector/solutions/iam/helm/member-iam/templates/iampolicymember.yaml b/config-connector/solutions/iam/helm/member-iam/templates/iampolicymember.yaml deleted file mode 100644 index a1aa749d586..00000000000 --- a/config-connector/solutions/iam/helm/member-iam/templates/iampolicymember.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: iam.cnrm.cloud.google.com/v1beta1 -kind: IAMPolicyMember -metadata: - name: project-iam-member -spec: - member: "serviceAccount:{{ .Values.iamPolicyMember.iamMember }}@{{ required "Desired Project ID is required" .Values.projectID }}.iam.gserviceaccount.com" - role: {{ .Values.iamPolicyMember.role }} - resourceRef: - apiVersion: resourcemanager.cnrm.cloud.google.com/v1beta1 - kind: Project - external: "projects/{{ required "Desired Project ID is required" .Values.projectID }}" diff --git a/config-connector/solutions/iam/helm/member-iam/templates/iamserviceaccount.yaml b/config-connector/solutions/iam/helm/member-iam/templates/iamserviceaccount.yaml deleted file mode 100644 index b144e652392..00000000000 --- a/config-connector/solutions/iam/helm/member-iam/templates/iamserviceaccount.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: iam.cnrm.cloud.google.com/v1beta1 -kind: IAMServiceAccount -metadata: - name: {{ .Values.iamPolicyMember.iamMember }} diff --git a/config-connector/solutions/iam/helm/member-iam/values.yaml b/config-connector/solutions/iam/helm/member-iam/values.yaml deleted file mode 100644 index 3696e2d1e94..00000000000 --- a/config-connector/solutions/iam/helm/member-iam/values.yaml +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -projectID: - -iamPolicyMember: - iamMember: member-iam-test - role: roles/compute.networkAdmin diff --git a/config-connector/solutions/iam/helm/project-iam/Chart.yaml b/config-connector/solutions/iam/helm/project-iam/Chart.yaml deleted file mode 100644 index 16eefafb8ec..00000000000 --- a/config-connector/solutions/iam/helm/project-iam/Chart.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -name: project-iam -version: 0.1.0 -description: Grant a role for a member in a desired project. diff --git a/config-connector/solutions/iam/helm/project-iam/README.md b/config-connector/solutions/iam/helm/project-iam/README.md deleted file mode 100644 index 934ab0680fd..00000000000 --- a/config-connector/solutions/iam/helm/project-iam/README.md +++ /dev/null @@ -1,88 +0,0 @@ -# Project IAM - -================================================== - -## NAME - - project-iam - -## SYNOPSIS - - Config Connector compatible YAML files to grant a role for a member in a desired project. - -## CONSUMPTION - - 1. Clone GoogleCloudPlatform/cloud-foundation-toolkit repository: - - ```bash - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit.git - ``` - - 1. Go to the project-iam folder: - - ```bash - cd cloud-foundation-toolkit/config-connector/solutions/iam/helm/project-iam - ``` - -## REQUIREMENTS - -1. GKE Cluster with [Config Connector installed using a GKE Workload Identity](https://cloud.google.com/config-connector/docs/how-to/install-upgrade-uninstall#workload-identity). -1. [Helm](../../../README.md#helm) -1. The "cnrm-system" service account that has the `roles/resourcemanager.projectIamAdmin` - role in your desired project (it doesn't need to be the project managed by Config Connector). -1. The project managed by Config Connector has Cloud Resource Manager API enabled. - -## USAGE - -All steps are run from the current directory ([config-connector/solutions/iam/helm/project-iam](.)). - -1. Review and update the values in `./values.yaml`. - -1. Validate and install the sample with Helm. `PROJECT_ID` should be your desired project ID unique with in GCP. - - ```bash - # validate your chart - helm lint . --set iamPolicyMember.iamMember=user:name@example.com,projectID=PROJECT_ID - - # check the output of your chart - helm template . --set iamPolicyMember.iamMember=user:name@example.com,projectID=PROJECT_ID - - # do a dryrun on your chart and address issues if there are any - helm install . --dry-run --set iamPolicyMember.iamMember=user:name@example.com,projectID=PROJECT_ID --generate-name - - # install your chart - helm install . --set iamPolicyMember.iamMember=user:name@example.com,projectID=PROJECT_ID --generate-name - ``` - -1. _Optionaly_, you can also change the role (defaults to `roles/logging.viewer`): - - ```bash - # install your chart with a diffirent role - helm install . --set iamPolicyMember.iamMember=user:name@example.com,iamPolicyMember.role=roles/logging.admin,projectID=PROJECT_ID --generate-name - ``` - -1. Check the created helm release to verify the installation: - - ```bash - helm list - ``` - - Check the status of the IAM Policy Member: - - ```bash - kubectl describe iampolicymember project-iam-member - ``` - -1. Clean up the installation: - - ```bash - # list Helm releases to obtain release name - helm list - - # delete release specifying release name from the previous command output. - helm delete [release_name] - ``` - -## LICENSE - -Apache 2.0 - See [LICENSE](/LICENSE) for more information. diff --git a/config-connector/solutions/iam/helm/project-iam/templates/iampolicymember.yaml b/config-connector/solutions/iam/helm/project-iam/templates/iampolicymember.yaml deleted file mode 100644 index 41f77eb1c8f..00000000000 --- a/config-connector/solutions/iam/helm/project-iam/templates/iampolicymember.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: iam.cnrm.cloud.google.com/v1beta1 -kind: IAMPolicyMember -metadata: - name: project-iam-member -spec: - member: {{ required "IAM member is required!" .Values.iamPolicyMember.iamMember }} - role: {{ .Values.iamPolicyMember.role }} - resourceRef: - apiVersion: resourcemanager.cnrm.cloud.google.com/v1beta1 - kind: Project - external: "projects/{{ required "Desired Project ID is required" .Values.projectID }}" diff --git a/config-connector/solutions/iam/helm/project-iam/values.yaml b/config-connector/solutions/iam/helm/project-iam/values.yaml deleted file mode 100644 index 07e1dc902ac..00000000000 --- a/config-connector/solutions/iam/helm/project-iam/values.yaml +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -projectID: - -iamPolicyMember: - iamMember: - role: roles/logging.viewer diff --git a/config-connector/solutions/iam/helm/pubsub-subscription/Chart.yaml b/config-connector/solutions/iam/helm/pubsub-subscription/Chart.yaml deleted file mode 100644 index 9ac3ad89e8f..00000000000 --- a/config-connector/solutions/iam/helm/pubsub-subscription/Chart.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -name: pubsub-subscription -version: 0.1.0 -description: Creates a pubsub subscription and configures permissions for it diff --git a/config-connector/solutions/iam/helm/pubsub-subscription/README.md b/config-connector/solutions/iam/helm/pubsub-subscription/README.md deleted file mode 100644 index 84289448963..00000000000 --- a/config-connector/solutions/iam/helm/pubsub-subscription/README.md +++ /dev/null @@ -1,100 +0,0 @@ -# Pub/Sub Subscription - -================================================== - -## NAME - - pubsub-subscription - -## SYNOPSIS - - Config Connector compatible YAML files to create a Pub/Sub subscription and Pub/Sub topic if it doesn't exist in your desired project and grant a specific member a role (default to roles/pubsub.viewer) for accessing the Pub/Sub subscription that just created - -## CONSUMPTION - - 1. Clone GoogleCloudPlatform/cloud-foundation-toolkit repository: - - ```bash - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit.git - ``` - - 1. Go to the service account folder: - - ```bash - cd cloud-foundation-toolkit/config-connector/solutions/iam/helm/pubsub-subscription - ``` - -## REQUIREMENTS - -1. GKE Cluster with Config Connector and [Workload Identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#enable_workload_identity_on_a_new_cluster). -1. [Helm](../../../README.md#helm) -1. The "cnrm-system" service account assigned with either `roles/pubsub.viewer` or `roles/owner` in the project managed by Config Connector -1. Cloud Pub/Sub API enabled in the project where Config Connector is installed -1. Cloud Pub/Sub API enabled in the project managed by Config Connector if it is a different project - -## USAGE - -All steps are run from the current directory ([config-connector/solutions/iam/helm/pubsub-subscription](.)). - -1. Review and update the values in `./values.yaml`. - -1. Validate and install the sample with Helm. - - ```bash - # validate your chart - helm lint . --set iamPolicyMember.iamMember=user:name@example.com - - # check the output of your chart - helm template . --set iamPolicyMember.iamMember=user:name@example.com - - # do a dryrun on your chart and address issues if there are any - helm install . --dry-run --set iamPolicyMember.iamMember=user:name@example.com --generate-name - - # install your chart - helm install . --set iamPolicyMember.iamMember=user:name@example.com --generate-name - ``` - -1. _Optionaly_, you can set the name of the pubsub subscription (defaults to `allowed-subscription`), set the name of the PubSub topic (defaults to `allowed-topic`) and the role to grant (defaults to `roles/pubsub.viewer`, full list of roles [here](https://cloud.google.com/iam/docs/understanding-roles#pub-sub-roles)) by explictly setting them when installing the solution: - - ```bash - # install your chart with a difirent pubsub subscription and pubsub topic - helm install . --set PubSubTopic.name=your-topic-name,PubSubSubscription.name=your-subscription-name,iamPolicyMember.iamMember=user:name@example.com --generate-name - ``` - Or, - ```bash - # install your chart with a new role - helm install . --set iamPolicyMember.role=roles/pubsub.editor,iamPolicyMember.iamMember=user:name@example.com --generate-name - ``` - Or set there in one command. - -1. Check the created helm release to verify the installation: - ```bash - helm list - ``` - Check the status of the pubsub subscription and topic resource by running: - - Note: By default value of pubsub topic name is ```allowed-topic``` and pubsub subscription name is ```allowed-subscription``` - - ```bash - kubectl describe pubsubsubscriptions [pubsub subscription name] - - kubectl describe pubsubtopic [pubsub topic name] - ``` - Check the status of the IAM Policy Member: - ```bash - kubectl describe iampolicymember topic-iam-member - ``` - -1. Clean up the installation: - - ```bash - # list Helm releases to obtain release name - helm list - - # delete release specifying release name from the previous command output. - helm delete [release_name] - ``` - -## LICENSE - -Apache 2.0 - See [LICENSE](/LICENSE) for more information. diff --git a/config-connector/solutions/iam/helm/pubsub-subscription/templates/iampolicymember.yaml b/config-connector/solutions/iam/helm/pubsub-subscription/templates/iampolicymember.yaml deleted file mode 100644 index e4a38317d4b..00000000000 --- a/config-connector/solutions/iam/helm/pubsub-subscription/templates/iampolicymember.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: iam.cnrm.cloud.google.com/v1beta1 -kind: IAMPolicyMember -metadata: - name: topic-iam-member -spec: - member: {{ required "IAM member is required!" .Values.iamPolicyMember.iamMember }} - role: {{ .Values.iamPolicyMember.role }} - resourceRef: - apiVersion: pubsub.cnrm.cloud.google.com/v1beta1 - kind: PubSubSubscription - name: {{ .Values.PubSubSubscription.name }} diff --git a/config-connector/solutions/iam/helm/pubsub-subscription/templates/pubsubsubscription.yaml b/config-connector/solutions/iam/helm/pubsub-subscription/templates/pubsubsubscription.yaml deleted file mode 100644 index db55a63bfe2..00000000000 --- a/config-connector/solutions/iam/helm/pubsub-subscription/templates/pubsubsubscription.yaml +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: pubsub.cnrm.cloud.google.com/v1beta1 -kind: PubSubSubscription -metadata: - name: {{ .Values.PubSubSubscription.name }} -spec: - ackDeadlineSeconds: 15 - messageRetentionDuration: 86400s - retainAckedMessages: false - topicRef: - name: {{ .Values.PubSubTopic.name }} diff --git a/config-connector/solutions/iam/helm/pubsub-subscription/templates/pubsubtopic.yaml b/config-connector/solutions/iam/helm/pubsub-subscription/templates/pubsubtopic.yaml deleted file mode 100644 index 1fef88b5fcb..00000000000 --- a/config-connector/solutions/iam/helm/pubsub-subscription/templates/pubsubtopic.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: pubsub.cnrm.cloud.google.com/v1beta1 -kind: PubSubTopic -metadata: - name: {{ .Values.PubSubTopic.name }} diff --git a/config-connector/solutions/iam/helm/pubsub-subscription/values.yaml b/config-connector/solutions/iam/helm/pubsub-subscription/values.yaml deleted file mode 100644 index ae8c4066400..00000000000 --- a/config-connector/solutions/iam/helm/pubsub-subscription/values.yaml +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -iamPolicyMember: - iamMember: - role: roles/pubsub.viewer - -PubSubTopic: - name: allowed-topic - -PubSubSubscription: - name: allowed-subscription diff --git a/config-connector/solutions/iam/helm/pubsub-topic/Chart.yaml b/config-connector/solutions/iam/helm/pubsub-topic/Chart.yaml deleted file mode 100644 index b33997f49e9..00000000000 --- a/config-connector/solutions/iam/helm/pubsub-topic/Chart.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -name: pubsub-topic -version: 0.1.0 -description: Grant an IAM role to a member for a PubSub topic diff --git a/config-connector/solutions/iam/helm/pubsub-topic/README.md b/config-connector/solutions/iam/helm/pubsub-topic/README.md deleted file mode 100644 index 75ad9457272..00000000000 --- a/config-connector/solutions/iam/helm/pubsub-topic/README.md +++ /dev/null @@ -1,102 +0,0 @@ -# Pub/Sub Topic - -================================================== - -## NAME - - pubsub-topic - -## SYNOPSIS - - - Config Connector compatible YAML files to create a Pub/Sub topic in your desired project, and grant a specific member a role (default to roles/pubsub.editor) for accessing the Pub/Sub topic that just created - -## CONSUMPTION - - 1. Clone GoogleCloudPlatform/cloud-foundation-toolkit repository: - - ```bash - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit.git - ``` - - 1. Go to the service account folder: - - ```bash - cd cloud-foundation-toolkit/config-connector/solutions/iam/helm/pubsub-topic - ``` - -## REQUIREMENTS - -1. GKE Cluster with Config Connector and [Workload Identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#enable_workload_identity_on_a_new_cluster). -1. [Helm](../../../README.md#helm) -1. The "cnrm-system" service account assigned with either `roles/pubsub.admin` or `roles/owner` in the - project managed by Config Connector -1. Cloud Pub/Sub API enabled in the project where Config Connector is - installed -1. Cloud Pub/Sub API enabled in the project managed by Config Connector if it - is a different project - -## USAGE - -All steps are run from the current directory ([config-connector/solutions/iam/helm/pubsub-topic](.)). - -1. Review and update the values in `./values.yaml`. - -1. Validate and install the sample with Helm. - - ```bash - # validate your chart - helm lint . --set iamPolicyMember.iamMember=user:name@example.com - - # check the output of your chart - helm template . --set iamPolicyMember.iamMember=user:name@example.com - - # do a dryrun on your chart and address issues if there are any - helm install . --dry-run --set iamPolicyMember.iamMember=user:name@example.com --generate-name - - # install your chart - helm install . --set iamPolicyMember.iamMember=user:name@example.com --generate-name - ``` - -1. _Optionaly_, you can set the name of the PubSub topic (defaults to `allowed-topic`) and the role to grant (defaults to `roles/pubsub.editor`, full list of roles [here](https://cloud.google.com/iam/docs/understanding-roles#pub-sub-roles)) by explictly setting them when installing the solution: - - ```bash - # install your chart with a difirent name of the PubSub topic - helm install . --set PubSubTopic.name=your-topic,iamPolicyMember.iamMember=user:name@example.com --generate-name - ``` - Or, - ```bash - # install your chart with a new role - helm install . --set iamPolicyMember.role=roles/pubsub.viewer,iamPolicyMember.iamMember=user:name@example.com --generate-name - ``` - Or set them both in one command. - -1. Check the created helm release to verify the installation: - ```bash - helm list - ``` - Check the status of the pub/sub topic resource by running: - - Note: By default value of Pub/Sub topic name is ```allowed-topic``` - - ```bash - kubectl describe pubsubtopic [topic name] - ``` - Check the status of the IAM Policy Member: - ```bash - kubectl describe iampolicymember topic-iam-member - ``` - -1. Clean up the installation: - - ```bash - # list Helm releases to obtain release name - helm list - - # delete release specifying release name from the previous command output. - helm delete [release_name] - ``` - -## LICENSE - -Apache 2.0 - See [LICENSE](/LICENSE) for more information. diff --git a/config-connector/solutions/iam/helm/pubsub-topic/templates/iampolicymember.yaml b/config-connector/solutions/iam/helm/pubsub-topic/templates/iampolicymember.yaml deleted file mode 100644 index 2cb33b02a4e..00000000000 --- a/config-connector/solutions/iam/helm/pubsub-topic/templates/iampolicymember.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: iam.cnrm.cloud.google.com/v1beta1 -kind: IAMPolicyMember -metadata: - name: topic-iam-member -spec: - member: {{ required "IAM member is required!" .Values.iamPolicyMember.iamMember }} - role: {{ .Values.iamPolicyMember.role }} - resourceRef: - apiVersion: pubsub.cnrm.cloud.google.com/v1beta1 - kind: PubSubTopic - name: {{ .Values.PubSubTopic.name }} diff --git a/config-connector/solutions/iam/helm/pubsub-topic/templates/pubsubtopic.yaml b/config-connector/solutions/iam/helm/pubsub-topic/templates/pubsubtopic.yaml deleted file mode 100644 index 1fef88b5fcb..00000000000 --- a/config-connector/solutions/iam/helm/pubsub-topic/templates/pubsubtopic.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: pubsub.cnrm.cloud.google.com/v1beta1 -kind: PubSubTopic -metadata: - name: {{ .Values.PubSubTopic.name }} diff --git a/config-connector/solutions/iam/helm/pubsub-topic/values.yaml b/config-connector/solutions/iam/helm/pubsub-topic/values.yaml deleted file mode 100644 index 8264d1aaa98..00000000000 --- a/config-connector/solutions/iam/helm/pubsub-topic/values.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -iamPolicyMember: - iamMember: - role: roles/pubsub.editor - -PubSubTopic: - name: allowed-topic diff --git a/config-connector/solutions/iam/helm/service-account/Chart.yaml b/config-connector/solutions/iam/helm/service-account/Chart.yaml deleted file mode 100644 index e346c3e387f..00000000000 --- a/config-connector/solutions/iam/helm/service-account/Chart.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -name: service-account -version: 0.1.0 -description: Grant an IAM role to a member for accessing a given service account diff --git a/config-connector/solutions/iam/helm/service-account/README.md b/config-connector/solutions/iam/helm/service-account/README.md deleted file mode 100644 index 3aea394cc67..00000000000 --- a/config-connector/solutions/iam/helm/service-account/README.md +++ /dev/null @@ -1,91 +0,0 @@ -# Service Account - -================================================== - -## NAME - - service account - -## SYNOPSIS - - Config Connector compatible YAML files to create a service account in your desired project, and grant a specific member a role (default to `roles/iam.serviceAccountKeyAdmin`) for accessing the service account that just created. - -## CONSUMPTION - - 1. Clone GoogleCloudPlatform/cloud-foundation-toolkit repository: - - ```bash - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit.git - ``` - - 1. Go to the service account folder: - - ```bash - cd cloud-foundation-toolkit/config-connector/solutions/iam/helm/service-account - ``` - -## REQUIREMENTS - -1. GKE Cluster with Config Connector and [Workload Identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#enable_workload_identity_on_a_new_cluster). -1. [Helm](../../../README.md#helm) - -## USAGE - -All steps are run from the current directory ([config-connector/solutions/iam/helm/service-account](.)). - -1. Review and update the values in `./values.yaml`. - -1. Validate and install the sample with Helm. - - ```bash - # validate your chart - helm lint . --set iamPolicyMember.iamMember=user:name@example.com - - # check the output of your chart - helm template . --set iamPolicyMember.iamMember=user:name@example.com - - # Do a dryrun on your chart and address issues if there are any - helm install . --dry-run --set iamPolicyMember.iamMember=user:name@example.com --generate-name - - # install your chart - helm install . --set iamPolicyMember.iamMember=user:name@example.com --generate-name - ``` - -1. _Optionaly_, you can customize optional values by explictly setting them when installing the solution: - ```bash - # install your chart with a new service account name - helm install . --set serviceAccount.name=new-service-account,iamPolicyMember.iamMember=user:name@example.com --generate-name - ``` - Or, - ```bash - # install your chart with a new role - helm install . --set iamPolicyMember.role=roles/iam.serviceAccountTokenCreator,iamPolicyMember.iamMember=user:name@example.com --generate-name - ``` - Or set them both in one command. - -1. Check the created helm release to verify the installation: - ```bash - helm list - ``` - Check the status of the service account resource by running: - ```bash - kubectl describe iamserviceaccount [service account name] - ``` - Check the status of the IAM Policy Member: - ```bash - kubectl describe iampolicymember iampolicymember-service-account - ``` - -1. Clean up the installation: - - ```bash - # list Helm releases to obtain release name - helm list - - # delete release specifying release name from the previous command output. - helm delete [release_name] - ``` - -## LICENSE - -Apache 2.0 - See [LICENSE](/LICENSE) for more information. diff --git a/config-connector/solutions/iam/helm/service-account/templates/iampolicymember.yaml b/config-connector/solutions/iam/helm/service-account/templates/iampolicymember.yaml deleted file mode 100644 index 7c49bf37770..00000000000 --- a/config-connector/solutions/iam/helm/service-account/templates/iampolicymember.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: iam.cnrm.cloud.google.com/v1beta1 -kind: IAMPolicyMember -metadata: - name: iampolicymember-service-account -spec: - member: {{ required "IAM member is required!" .Values.iamPolicyMember.iamMember }} - role: {{ .Values.iamPolicyMember.role }} - resourceRef: - apiVersion: iam.cnrm.cloud.google.com/v1beta1 - kind: IAMServiceAccount - name: {{ .Values.serviceAccount.name }} diff --git a/config-connector/solutions/iam/helm/service-account/templates/iamserviceaccount.yaml b/config-connector/solutions/iam/helm/service-account/templates/iamserviceaccount.yaml deleted file mode 100644 index 31f02c29105..00000000000 --- a/config-connector/solutions/iam/helm/service-account/templates/iamserviceaccount.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: iam.cnrm.cloud.google.com/v1beta1 -kind: IAMServiceAccount -metadata: - name: {{ .Values.serviceAccount.name }} diff --git a/config-connector/solutions/iam/helm/service-account/values.yaml b/config-connector/solutions/iam/helm/service-account/values.yaml deleted file mode 100644 index a874b2cf2cd..00000000000 --- a/config-connector/solutions/iam/helm/service-account/values.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -iamPolicyMember: - iamMember: - role: roles/iam.serviceAccountKeyAdmin - -serviceAccount: - name: service-account-solution diff --git a/config-connector/solutions/iam/helm/storage-bucket-iam/Chart.yaml b/config-connector/solutions/iam/helm/storage-bucket-iam/Chart.yaml deleted file mode 100644 index 12709a6385d..00000000000 --- a/config-connector/solutions/iam/helm/storage-bucket-iam/Chart.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -name: storage-bucket-iam -version: 0.1.0 -description: Grant an IAM role to a member for a Storage Bucket diff --git a/config-connector/solutions/iam/helm/storage-bucket-iam/README.md b/config-connector/solutions/iam/helm/storage-bucket-iam/README.md deleted file mode 100644 index ea701ece323..00000000000 --- a/config-connector/solutions/iam/helm/storage-bucket-iam/README.md +++ /dev/null @@ -1,85 +0,0 @@ -# Storage Bucket IAM - -================================================== - -## NAME - - storage bucket iam - -## SYNOPSIS - - Config Connector compatible yaml to enable permissions for a storage bucket. - -## CONSUMPTION - - 1. Clone GoogleCloudPlatform/cloud-foundation-toolkit repository: - - ```bash - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit.git - ``` - - 1. Go to the service account folder: - - ```bash - cd cloud-foundation-toolkit/config-connector/solutions/iam/helm/storage-bucket-iam - ``` - -## REQUIREMENTS - -1. GKE Cluster with Config Connector and [Workload Identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#enable_workload_identity_on_a_new_cluster). -1. [Helm](../../../README.md#helm) -1. A working Config Connector instance. -1. A storage bucket managed by [IAM](https://cloud.google.com/storage/docs/access-control#using_permissions_with_acls). -1. The "cnrm-system" service account with `roles/storage.admin` in either - the storage bucket or the project which owns the storage bucket. - -## USAGE - -All steps are run from the current directory ([config-connector/solutions/iam/helm/storage-bucket-iam](.)). - -1. Review and update the values in `./values.yaml`. - -1. Validate and install the sample with Helm. - - ```bash - # validate your chart - helm lint . --set iamPolicyMember.iamMember=user:name@example.com,StorageBucket.name=your-bucket - - # check the output of your chart - helm template . --set iamPolicyMember.iamMember=user:name@example.com,StorageBucket.name=your-bucket - - # Do a dryrun on your chart and address issues if there are any - helm install . --dry-run --set iamPolicyMember.iamMember=user:name@example.com,StorageBucket.name=your-bucket --generate-name - - # install your chart - helm install . --set iamPolicyMember.iamMember=user:name@example.com,StorageBucket.name=your-bucket --generate-name - ``` - -1. _Optionaly_, you can customize optional value role of iam policy member (defaults to `roles/storage.objectViewer`, full list of roles [here](https://cloud.google.com/iam/docs/understanding-roles#storage-roles)): - ```bash - # install your chart with a new role - helm install . --set iamPolicyMember.iamMember=user:name@example.com,StorageBucket.name=your-bucket,iamPolicyMember.role=roles/storage.admin --generate-name - ``` - -1. Check the created helm release to verify the installation: - ```bash - helm list - ``` - Check the status of the IAM Policy Member: - ```bash - kubectl describe iampolicymember storage-bucket-iam-member - ``` - -1. Clean up the installation: - - ```bash - # list Helm releases to obtain release name - helm list - - # delete release specifying release name from the previous command output. - helm delete [release_name] - ``` - -## LICENSE - -Apache 2.0 - See [LICENSE](/LICENSE) for more information. diff --git a/config-connector/solutions/iam/helm/storage-bucket-iam/templates/iampolicymember.yaml b/config-connector/solutions/iam/helm/storage-bucket-iam/templates/iampolicymember.yaml deleted file mode 100644 index 4a85b1fffbe..00000000000 --- a/config-connector/solutions/iam/helm/storage-bucket-iam/templates/iampolicymember.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: iam.cnrm.cloud.google.com/v1beta1 -kind: IAMPolicyMember -metadata: - name: storage-bucket-iam-member -spec: - member: {{ required "IAM member is required!" .Values.iamPolicyMember.iamMember }} - role: {{ .Values.iamPolicyMember.role }} - resourceRef: - apiVersion: storage.cnrm.cloud.google.com/v1beta1 - kind: StorageBucket - external: {{ required "Existing Storage Bucket name is required" .Values.StorageBucket.name }} diff --git a/config-connector/solutions/iam/helm/storage-bucket-iam/values.yaml b/config-connector/solutions/iam/helm/storage-bucket-iam/values.yaml deleted file mode 100644 index 6764c9abbed..00000000000 --- a/config-connector/solutions/iam/helm/storage-bucket-iam/values.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -iamPolicyMember: - iamMember: - role: roles/storage.objectViewer - -StorageBucket: - name: diff --git a/config-connector/solutions/iam/helm/subnet/Chart.yaml b/config-connector/solutions/iam/helm/subnet/Chart.yaml deleted file mode 100644 index 21377ad4ab5..00000000000 --- a/config-connector/solutions/iam/helm/subnet/Chart.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -name: subnet -version: 0.1.0 -description: Create a subnet and grant a specific member a role for the subnet diff --git a/config-connector/solutions/iam/helm/subnet/README.md b/config-connector/solutions/iam/helm/subnet/README.md deleted file mode 100644 index 4730b32d586..00000000000 --- a/config-connector/solutions/iam/helm/subnet/README.md +++ /dev/null @@ -1,96 +0,0 @@ -# Subnet - -================================================== - -## NAME - - subnet - -## SYNOPSIS - - Config Connector compatible YAML files to create a subnet in your desired project, and grant a specific member a role (default to `roles/compute.networkUser`) for accessing the subnet that just created. - -## CONSUMPTION - - 1. Clone GoogleCloudPlatform/cloud-foundation-toolkit repository: - - ```bash - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit.git - ``` - - 1. Go to the subnet folder: - - ```bash - cd cloud-foundation-toolkit/config-connector/solutions/iam/helm/subnet - ``` - -## REQUIREMENTS - -1. GKE Cluster with Config Connector and [Workload Identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#enable_workload_identity_on_a_new_cluster). -1. [Helm](../../../README.md#helm) - -## USAGE - -All steps are run from the current directory ([config-connector/solutions/iam/helm/subnet](.)). - -1. Review and update the values in `./values.yaml`. - -1. Validate and install the sample with Helm. - - ```bash - # validate your chart - helm lint . --set iamPolicyMember.iamMember=user:name@example.com - - # check the output of your chart - helm template . --set iamPolicyMember.iamMember=user:name@example.com - - # do a dryrun on your chart and address issues if there are any - helm install . --dry-run --set iamPolicyMember.iamMember=user:name@example.com --generate-name - - # install your chart - helm install . --set iamPolicyMember.iamMember=user:name@example.com --generate-name - ``` - -1. _Optionaly_, you can customize optional values by explictly setting them when installing the solution: - ```bash - # install your chart with a new subnet name - helm install . --set subnet.name=new-subnet,iamPolicyMember.iamMember=user:name@example.com --generate-name - ``` - Or, - ```bash - # install your chart with a IAM role - helm install . --set iamPolicyMember.role=roles/compute.networkViewer,iamPolicyMember.iamMember=user:name@example.com --generate-name - ``` - Or, - ```bash - # install your chart with another compute network - helm install . --set computeNetwork.name=VALUE,iamPolicyMember.iamMember=user:name@example.com --generate-name - ``` - Or set any of them at the same time in one command. - -1. Check the created helm release to verify the installation: - ```bash - helm list - ``` - Check the status of the subnet resource by running: - ```bash - kubectl describe ComputeSubnetwork [subnet name] - ``` - Check the status of the IAM Policy Member: - ```bash - kubectl describe iampolicymember iampolicymember-subnet - ``` - -1. Clean up the installation: - - ```bash - # list Helm releases to obtain release name - helm list - - # delete release specifying release name from the previous command output. - helm delete [release_name] - ``` - -## LICENSE - -Apache 2.0 - See [LICENSE](/LICENSE) for more information. diff --git a/config-connector/solutions/iam/helm/subnet/templates/computernetwork.yaml b/config-connector/solutions/iam/helm/subnet/templates/computernetwork.yaml deleted file mode 100644 index 2a76a0b2646..00000000000 --- a/config-connector/solutions/iam/helm/subnet/templates/computernetwork.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: compute.cnrm.cloud.google.com/v1beta1 -kind: ComputeNetwork -metadata: - name: {{ .Values.computeNetwork.name }} -spec: - routingMode: REGIONAL - autoCreateSubnetworks: false diff --git a/config-connector/solutions/iam/helm/subnet/templates/iampolicymember.yaml b/config-connector/solutions/iam/helm/subnet/templates/iampolicymember.yaml deleted file mode 100644 index 99acab2bbf3..00000000000 --- a/config-connector/solutions/iam/helm/subnet/templates/iampolicymember.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: iam.cnrm.cloud.google.com/v1beta1 -kind: IAMPolicyMember -metadata: - name: iampolicymember-subnet -spec: - member: {{ required "IAM member is required!" .Values.iamPolicyMember.iamMember }} - role: {{ .Values.iamPolicyMember.role }} - resourceRef: - apiVersion: compute.cnrm.cloud.google.com/v1beta1 - kind: ComputeSubnetwork - name: {{ .Values.subnet.name }} diff --git a/config-connector/solutions/iam/helm/subnet/templates/subnet.yaml b/config-connector/solutions/iam/helm/subnet/templates/subnet.yaml deleted file mode 100644 index c9a34193d7c..00000000000 --- a/config-connector/solutions/iam/helm/subnet/templates/subnet.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: compute.cnrm.cloud.google.com/v1beta1 -kind: ComputeSubnetwork -metadata: - name: {{ .Values.subnet.name }} -spec: - ipCidrRange: 10.0.0.0/9 - region: {{ .Values.subnet.region }} - privateIpGoogleAccess: false - networkRef: - name: {{ .Values.computeNetwork.name }} - logConfig: - aggregationInterval: INTERVAL_10_MIN - flowSampling: 0.5 - metadata: INCLUDE_ALL_METADATA diff --git a/config-connector/solutions/iam/helm/subnet/values.yaml b/config-connector/solutions/iam/helm/subnet/values.yaml deleted file mode 100644 index f4d8329d482..00000000000 --- a/config-connector/solutions/iam/helm/subnet/values.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -iamPolicyMember: - iamMember: - role: roles/compute.networkUser - -subnet: - name: subnet-solution - region: us-central1 - -computeNetwork: - name: compute-network-example diff --git a/config-connector/solutions/iam/kpt/folder-iam/Kptfile b/config-connector/solutions/iam/kpt/folder-iam/Kptfile deleted file mode 100644 index 8fcd88ac037..00000000000 --- a/config-connector/solutions/iam/kpt/folder-iam/Kptfile +++ /dev/null @@ -1,29 +0,0 @@ -apiVersion: kpt.dev/v1alpha1 -kind: Kptfile -metadata: - name: folder-iam -packageMetadata: - shortDescription: Configure permissions for a GCP folder -openAPI: - definitions: - io.k8s.cli.setters.folder-id: - x-k8s-cli: - setter: - name: folder-id - value: "${FOLDER_ID?}" - setBy: PLACEHOLDER - description: the numaric id of the folder - io.k8s.cli.setters.iam-member: - x-k8s-cli: - setter: - name: iam-member - value: ${IAM_MEMBER?} - setBy: PLACEHOLDER - description: GCP identity to grant role to - io.k8s.cli.setters.role: - x-k8s-cli: - setter: - name: role - value: roles/resourcemanager.folderEditor - setBy: package-default - description: the IAM role to grant to the member diff --git a/config-connector/solutions/iam/kpt/folder-iam/README.md b/config-connector/solutions/iam/kpt/folder-iam/README.md deleted file mode 100644 index b1b8ef354da..00000000000 --- a/config-connector/solutions/iam/kpt/folder-iam/README.md +++ /dev/null @@ -1,55 +0,0 @@ -Folder IAM -================================================== - -# NAME - - folder-iam - -# SYNOPSIS - - Config Connector compatible YAML files to grant a specific member a role (default to `roles/resourcemanager.folderEditor`) to an existing folder. - -# CONSUMPTION - - Download the package using [kpt](https://googlecontainertools.github.io/kpt/): - - ``` - kpt pkg get https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit.git/config-connector/solutions/iam/kpt/folder-iam folder-iam - ``` - -# REQUIREMENTS - - - A working Config Connector instance using the "cnrm-system" service - account with the following role in the desired folder: - - `roles/resourcemanager.folderIamAdmin` - - Cloud Resource Manager API enabled in the project where Config Connector - is installed - -# USAGE - - Replace the `${FOLDER_ID?}` with a folder ID you want to add member to: - ``` - kpt cfg set . folder-id VALUE - ``` - - Replace the `${IAM_MEMBER?}` with a GCP identity to grant role to: - ``` - kpt cfg set . iam-member VALUE - ``` - - _Optionally_, you can also change the role granted to the member. (you can find all of the folder related IAM roles - [here](https://cloud.google.com/iam/docs/understanding-roles#resource-manager-roles)): - - ``` - kpt cfg set . role roles/resourcemanager.folderViewer - ``` - - Apply the YAMLs: - - ``` - kubectl apply -f . - ``` - -# LICENSE - - Apache 2.0 - See [LICENSE](/LICENSE) for more information. diff --git a/config-connector/solutions/iam/kpt/folder-iam/iampolicymember.yaml b/config-connector/solutions/iam/kpt/folder-iam/iampolicymember.yaml deleted file mode 100644 index d0f3b4966b3..00000000000 --- a/config-connector/solutions/iam/kpt/folder-iam/iampolicymember.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: iam.cnrm.cloud.google.com/v1beta1 -kind: IAMPolicyMember -metadata: - name: iampolicymember-folder-iam -spec: - member: ${IAM_MEMBER?} # {"$ref":"#/definitions/io.k8s.cli.setters.iam-member"} - role: roles/resourcemanager.folderEditor # {"$ref":"#/definitions/io.k8s.cli.setters.role"} - resourceRef: - apiVersion: resourcemanager.cnrm.cloud.google.com/v1beta1 - kind: Folder - external: "${FOLDER_ID?}" # {"$ref":"#/definitions/io.k8s.cli.setters.folder-id"} diff --git a/config-connector/solutions/iam/kpt/kms-crypto-key/Kptfile b/config-connector/solutions/iam/kpt/kms-crypto-key/Kptfile deleted file mode 100644 index 248125174a6..00000000000 --- a/config-connector/solutions/iam/kpt/kms-crypto-key/Kptfile +++ /dev/null @@ -1,44 +0,0 @@ -apiVersion: kpt.dev/v1alpha1 -kind: Kptfile -metadata: - name: kms-crypto-key -packageMetadata: - shortDescription: create a kms key ring, a kms crypto key, and apply an IAM role - to the crypto key -openAPI: - definitions: - io.k8s.cli.setters.iam-member: - description: IAM member to grant role - x-k8s-cli: - setter: - name: iam-member - value: ${IAM_MEMBER?} - setBy: PLACEHOLDER - io.k8s.cli.setters.role: - description: IAM role to grant - x-k8s-cli: - setter: - name: role - value: roles/cloudkms.signer - setBy: package-default - io.k8s.cli.setters.key-name: - description: name of key - x-k8s-cli: - setter: - name: key-name - value: allowed-key - setBy: package-default - io.k8s.cli.setters.ring-name: - description: name of ring - x-k8s-cli: - setter: - name: ring-name - value: allowed-ring - setBy: package-default - io.k8s.cli.setters.location: - description: location of ring - x-k8s-cli: - setter: - name: location - value: us-central1 - setBy: package-default diff --git a/config-connector/solutions/iam/kpt/kms-crypto-key/README.md b/config-connector/solutions/iam/kpt/kms-crypto-key/README.md deleted file mode 100644 index 8eec8695381..00000000000 --- a/config-connector/solutions/iam/kpt/kms-crypto-key/README.md +++ /dev/null @@ -1,52 +0,0 @@ -KMS Crypto Key -================================================== -# NAME - kms-crypto-key -# SYNOPSIS - Config Connector compatible yaml files to create a kms key ring, a kms crypto key, - and apply an IAM role to the crypto key. -# CONSUMPTION - Download the package using [kpt](https://googlecontainertools.github.io/kpt/). - ``` - kpt pkg get https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit.git/config-connector/solutions/iam/kpt/kms-crypto-key kms-crypto-key - ``` -# REQUIREMENTS - - A working Config Connector instance using the "cnrm-system" service - account with either `roles/cloudkms.admin` or `roles/owner` in the project - managed by Config Connector. - - Cloud Key Management Service (KMS) API enabled in the project where Config - Connector is installed - - Cloud Key Management Service (KMS) API enabled in the project managed by - Config Connector if it is a different project - - -# SETTERS -| NAME | VALUE | SET BY | DESCRIPTION | COUNT | -|------------|-----------------------|-----------------|--------------------------|-------| -| iam-member | ${IAM_MEMBER?} | PLACEHOLDER | IAM member to grant role | 1 | -| key-name | allowed-key | package-default | name of key | 2 | -| location | us-central1 | package-default | location of ring | 1 | -| ring-name | allowed-ring | package-default | name of ring | 2 | -| role | roles/cloudkms.signer | package-default | IAM role to grant | 1 | -# USAGE - Set the IAM member to apply a role to: - ``` - kpt cfg set . iam-member user:name@example.com - ``` - _Optionally_ set the role to apply: - ``` - kpt cfg set . role roles/cloudkms.cryptoKeyDecrypter - ``` - _Optionally_ set the crypto key name, key ring name, and key ring location: - ``` - kpt cfg set . key-name your-key - kpt cfg set . ring-name your-ring - kpt cfg set . location us-west1 - ``` - Once the values are satisfactory, apply: - ``` - kubectl apply -f . - ``` -# LICENSE - Apache 2.0 - See [LICENSE](/LICENSE) for more information. - diff --git a/config-connector/solutions/iam/kpt/kms-crypto-key/iampolicymember.yaml b/config-connector/solutions/iam/kpt/kms-crypto-key/iampolicymember.yaml deleted file mode 100644 index 5bd4eb9da09..00000000000 --- a/config-connector/solutions/iam/kpt/kms-crypto-key/iampolicymember.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: iam.cnrm.cloud.google.com/v1beta1 -kind: IAMPolicyMember -metadata: - name: iampolicymember-sample-condition -spec: - member: ${IAM_MEMBER?} # {"$ref":"#/definitions/io.k8s.cli.setters.iam-member"} - role: roles/cloudkms.signer # {"$ref":"#/definitions/io.k8s.cli.setters.role"} - resourceRef: - apiVersion: kms.cnrm.cloud.google.com/v1beta1 - kind: KMSCryptoKey - name: allowed-key # {"$ref":"#/definitions/io.k8s.cli.setters.key-name"} diff --git a/config-connector/solutions/iam/kpt/kms-crypto-key/kmscryptokey.yaml b/config-connector/solutions/iam/kpt/kms-crypto-key/kmscryptokey.yaml deleted file mode 100644 index e89f252aad5..00000000000 --- a/config-connector/solutions/iam/kpt/kms-crypto-key/kmscryptokey.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: kms.cnrm.cloud.google.com/v1beta1 -kind: KMSCryptoKey -metadata: - name: allowed-key # {"$ref":"#/definitions/io.k8s.cli.setters.key-name"} -spec: - keyRingRef: - name: allowed-ring # {"$ref":"#/definitions/io.k8s.cli.setters.ring-name"} - purpose: ASYMMETRIC_SIGN - versionTemplate: - algorithm: EC_SIGN_P384_SHA384 - protectionLevel: SOFTWARE diff --git a/config-connector/solutions/iam/kpt/kms-crypto-key/kmskeyring.yaml b/config-connector/solutions/iam/kpt/kms-crypto-key/kmskeyring.yaml deleted file mode 100644 index c459e16568e..00000000000 --- a/config-connector/solutions/iam/kpt/kms-crypto-key/kmskeyring.yaml +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: kms.cnrm.cloud.google.com/v1beta1 -kind: KMSKeyRing -metadata: - name: allowed-ring # {"$ref":"#/definitions/io.k8s.cli.setters.ring-name"} -spec: - location: us-central1 # {"$ref":"#/definitions/io.k8s.cli.setters.location"} diff --git a/config-connector/solutions/iam/kpt/kms-key-ring/Kptfile b/config-connector/solutions/iam/kpt/kms-key-ring/Kptfile deleted file mode 100644 index 8ce7e0fce1d..00000000000 --- a/config-connector/solutions/iam/kpt/kms-key-ring/Kptfile +++ /dev/null @@ -1,36 +0,0 @@ -apiVersion: kpt.dev/v1alpha1 -kind: Kptfile -metadata: - name: kms-key-ring -packageMetadata: - shortDescription: create a kms key ring and apply an IAM role to it -openAPI: - definitions: - io.k8s.cli.setters.ring-name: - description: name of key ring - x-k8s-cli: - setter: - name: ring-name - value: allowed-ring - setBy: package-default - io.k8s.cli.setters.location: - description: location of key ring - x-k8s-cli: - setter: - name: location - value: us-central1 - setBy: package-default - io.k8s.cli.setters.role: - description: IAM role to grant - x-k8s-cli: - setter: - name: role - value: roles/cloudkms.admin - setBy: package-default - io.k8s.cli.setters.iam-member: - description: member to grant role - x-k8s-cli: - setter: - name: iam-member - value: ${IAM_MEMBER?} - setBy: PLACEHOLDER diff --git a/config-connector/solutions/iam/kpt/kms-key-ring/README.md b/config-connector/solutions/iam/kpt/kms-key-ring/README.md deleted file mode 100644 index 35bef88b7f2..00000000000 --- a/config-connector/solutions/iam/kpt/kms-key-ring/README.md +++ /dev/null @@ -1,47 +0,0 @@ -KMS Key Ring -================================================== - -# NAME - kms-key-ring -# SYNOPSIS - Config Connector compatible yaml files for creating a kms key ring and applying a role to it. -# CONSUMPTION - Download the package using [kpt](https://googlecontainertools.github.io/kpt/). - ``` - kpt pkg get https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit.git/config-connector/solutions/iam/kpt/kms-key-ring kms-key-ring - ``` -# REQUIREMENTS - - A working Config Connector instance using the "cnrm-system" service - account with either `roles/cloudkms.admin` or `roles/owner` in the project - managed by Config Connector. - - Cloud Key Management Service (KMS) API enabled in the project where Config - Connector is installed - - Cloud Key Management Service (KMS) API enabled in the project managed by - Config Connector if it is a different project - -# SETTERS -| NAME | VALUE | SET BY | DESCRIPTION | COUNT | -|------------|----------------------|-----------------|----------------------|-------| -| iam-member | ${IAM_MEMBER?} | PLACEHOLDER | member to grant role | 1 | -| location | us-central1 | package-default | location of key ring | 1 | -| ring-name | allowed-ring | package-default | name of key ring | 2 | -| role | roles/cloudkms.admin | package-default | IAM role to grant | 1 | -# USAGE - Set the IAM member that you would like to apply a role to. - ``` - kpt cfg set . iam-member user:name@example.com - ``` - _Optionally_ set the name of the KMS keyring (defaults to `allowed-ring`). - ``` - kpt cfg set . ring-name your-ring-name - ``` - _Optionally_ set the [IAM role](https://cloud.google.com/iam/docs/understanding-roles#cloud-kms-roles) to grant (defaults to `roles/cloudkms.admin`). - ``` - kpt cfg set . role roles/cloudkms.importer - ``` - _Optionally_ set the location of the ring (defaults to `us-central1`) - ``` - kpt cfg set . location us-west1 - ``` -# LICENSE - Apache 2.0 - See [LICENSE](/LICENSE) for more information. diff --git a/config-connector/solutions/iam/kpt/kms-key-ring/iampolicymember.yaml b/config-connector/solutions/iam/kpt/kms-key-ring/iampolicymember.yaml deleted file mode 100644 index 904479ad88f..00000000000 --- a/config-connector/solutions/iam/kpt/kms-key-ring/iampolicymember.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: iam.cnrm.cloud.google.com/v1beta1 -kind: IAMPolicyMember -metadata: - name: ring-iam-member -spec: - member: ${IAM_MEMBER?} # {"$ref":"#/definitions/io.k8s.cli.setters.iam-member"} - role: roles/cloudkms.admin # {"$ref":"#/definitions/io.k8s.cli.setters.role"} - resourceRef: - apiVersion: kms.cnrm.cloud.google.com/v1beta1 - kind: KMSKeyRing - name: allowed-ring # {"$ref":"#/definitions/io.k8s.cli.setters.ring-name"} diff --git a/config-connector/solutions/iam/kpt/kms-key-ring/kmskeyring.yaml b/config-connector/solutions/iam/kpt/kms-key-ring/kmskeyring.yaml deleted file mode 100644 index c459e16568e..00000000000 --- a/config-connector/solutions/iam/kpt/kms-key-ring/kmskeyring.yaml +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: kms.cnrm.cloud.google.com/v1beta1 -kind: KMSKeyRing -metadata: - name: allowed-ring # {"$ref":"#/definitions/io.k8s.cli.setters.ring-name"} -spec: - location: us-central1 # {"$ref":"#/definitions/io.k8s.cli.setters.location"} diff --git a/config-connector/solutions/iam/kpt/member-iam/0-namespace.yaml b/config-connector/solutions/iam/kpt/member-iam/0-namespace.yaml deleted file mode 100644 index 0b1cbeef377..00000000000 --- a/config-connector/solutions/iam/kpt/member-iam/0-namespace.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: Namespace -metadata: - annotations: - # Replace the ${PROJECT_ID?} below with your desired project ID. - cnrm.cloud.google.com/project-id: ${PROJECT_ID?} # {"$ref":"#/definitions/io.k8s.cli.setters.project-id"} - name: member-iam-solution diff --git a/config-connector/solutions/iam/kpt/member-iam/Kptfile b/config-connector/solutions/iam/kpt/member-iam/Kptfile deleted file mode 100644 index 161f333a32a..00000000000 --- a/config-connector/solutions/iam/kpt/member-iam/Kptfile +++ /dev/null @@ -1,49 +0,0 @@ -apiVersion: kpt.dev/v1alpha1 -kind: Kptfile -metadata: - name: . -packageMetadata: - shortDescription: Create a service account and grant it permissions to access the - desired project. -openAPI: - definitions: - io.k8s.cli.setters.project-id: - x-k8s-cli: - setter: - name: project-id - value: ${PROJECT_ID?} - setBy: PLACEHOLDER - description: The target project where you want to create service accounts and - grant permissions. - io.k8s.cli.substitutions.project-id-in-sa-sub: - x-k8s-cli: - substitution: - name: project-id-in-sa-sub - pattern: serviceAccount:SERVICE_ACCOUNT_NAME_SETTER@PROJECT_ID_SETTER.iam.gserviceaccount.com - values: - - marker: PROJECT_ID_SETTER - ref: '#/definitions/io.k8s.cli.setters.project-id' - - marker: SERVICE_ACCOUNT_NAME_SETTER - ref: '#/definitions/io.k8s.cli.setters.service-account-name' - io.k8s.cli.substitutions.project-id-sub: - x-k8s-cli: - substitution: - name: project-id-sub - pattern: projects/PROJECT_ID_SETTER - values: - - marker: PROJECT_ID_SETTER - ref: '#/definitions/io.k8s.cli.setters.project-id' - io.k8s.cli.setters.service-account-name: - x-k8s-cli: - setter: - name: service-account-name - value: member-iam-test - setBy: package-default - description: The name of the new IAM service account. - io.k8s.cli.setters.role: - x-k8s-cli: - setter: - name: role - value: roles/compute.networkAdmin - setBy: package-default - description: The role you want to grant the service account with. diff --git a/config-connector/solutions/iam/kpt/member-iam/README.md b/config-connector/solutions/iam/kpt/member-iam/README.md deleted file mode 100644 index 1d9d1b5969b..00000000000 --- a/config-connector/solutions/iam/kpt/member-iam/README.md +++ /dev/null @@ -1,65 +0,0 @@ -Member IAM -================================================== - -# NAME - - member-iam - -# SYNOPSIS - - Config Connector compatible YAML files to create a service account in your desired project, and grant it a specific role (defaults to `compute.networkAdmin`) in the project. - -# CONSUMPTION - - Using [kpt](https://googlecontainertools.github.io/kpt/): - - ``` - kpt pkg get https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit.git/config-connector/solutions/iam/kpt/member-iam member-iam - ``` - -# REQUIREMENTS - - * A working Config Connector cluster using "cnrm-system" service account - that has the following roles in your desired project (it doesn't need to - be the project managed by Config Connector): - - - `roles/resourcemanager.projectIamAdmin` - - `roles/iam.serviceAccountAdmin` - - * Cloud Resource Manager API enabled in the project where Config Connector - is installed - -# USAGE - - Replace `${PROJECT_ID?}` with your desired project ID value from - within this directory: - - ``` - kpt cfg set . project-id VALUE - ``` - - _Optionally_, you can also change the service account name and role - (you can find all the predefined GCP IAM roles - [here](https://cloud.google.com/iam/docs/understanding-roles#predefined_roles)): - - ``` - kpt cfg set . service-account-name VALUE - kpt cfg set . role VALUE - ``` - - Once the fields are set in the configs, apply the YAMLs: - - ``` - kubectl apply -f . - ``` - - You can check the resources you just created: - - ``` - kubectl get iamserviceaccount --namespace member-iam-solution - kubectl get iampolicymember --namespace member-iam-solution - ``` - -# LICENSE - - Apache 2.0 - See [LICENSE](/LICENSE) for more information. \ No newline at end of file diff --git a/config-connector/solutions/iam/kpt/member-iam/iampolicymember.yaml b/config-connector/solutions/iam/kpt/member-iam/iampolicymember.yaml deleted file mode 100644 index 8f320608058..00000000000 --- a/config-connector/solutions/iam/kpt/member-iam/iampolicymember.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: iam.cnrm.cloud.google.com/v1beta1 -kind: IAMPolicyMember -metadata: - name: project-iam-member - namespace: member-iam-solution -spec: - # Replace the ${PROJECT_ID?} below with your desired project ID. - member: serviceAccount:member-iam-test@${PROJECT_ID?}.iam.gserviceaccount.com # {"$ref":"#/definitions/io.k8s.cli.substitutions.project-id-in-sa-sub"} - role: roles/compute.networkAdmin # {"$ref":"#/definitions/io.k8s.cli.setters.role"} - resourceRef: - # Replace the ${PROJECT_ID?} below with your desired project ID. - apiVersion: resourcemanager.cnrm.cloud.google.com/v1beta1 - kind: Project - external: projects/${PROJECT_ID?} # {"$ref":"#/definitions/io.k8s.cli.substitutions.project-id-sub"} diff --git a/config-connector/solutions/iam/kpt/member-iam/iamserviceaccount.yaml b/config-connector/solutions/iam/kpt/member-iam/iamserviceaccount.yaml deleted file mode 100644 index 370aff2f4b5..00000000000 --- a/config-connector/solutions/iam/kpt/member-iam/iamserviceaccount.yaml +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: iam.cnrm.cloud.google.com/v1beta1 -kind: IAMServiceAccount -metadata: - name: member-iam-test # {"$ref":"#/definitions/io.k8s.cli.setters.service-account-name"} - namespace: member-iam-solution diff --git a/config-connector/solutions/iam/kpt/project-iam/Kptfile b/config-connector/solutions/iam/kpt/project-iam/Kptfile deleted file mode 100644 index 78614fcdaa2..00000000000 --- a/config-connector/solutions/iam/kpt/project-iam/Kptfile +++ /dev/null @@ -1,37 +0,0 @@ -apiVersion: kpt.dev/v1alpha1 -kind: Kptfile -metadata: - name: project-iam -packageMetadata: - shortDescription: configure permissions for a project -openAPI: - definitions: - io.k8s.cli.setters.member: - description: IAM member to grant role - x-k8s-cli: - setter: - name: member - value: ${IAM_MEMBER?} - setBy: PLACEHOLDER - io.k8s.cli.setters.role: - description: IAM role to grant - x-k8s-cli: - setter: - name: role - value: roles/logging.viewer - setBy: package-default - io.k8s.cli.setters.project-id: - description: ID of project - x-k8s-cli: - setter: - name: project-id - value: ${PROJECT_ID?} - setBy: PLACEHOLDER - io.k8s.cli.substitutions.project-id-sub: - x-k8s-cli: - substitution: - name: project-id-sub - pattern: projects/PROJECT_ID_SETTER - values: - - marker: PROJECT_ID_SETTER - ref: '#/definitions/io.k8s.cli.setters.project-id' diff --git a/config-connector/solutions/iam/kpt/project-iam/README.md b/config-connector/solutions/iam/kpt/project-iam/README.md deleted file mode 100644 index 60c12e011e8..00000000000 --- a/config-connector/solutions/iam/kpt/project-iam/README.md +++ /dev/null @@ -1,38 +0,0 @@ -Project IAM -================================================== - -# NAME - project-iam -# SYNOPSIS - Config Connector compatible YAML files to grant a role for a member in a project. -# CONSUMPTION - Download the package using [kpt](https://googlecontainertools.github.io/kpt/): - ``` - kpt pkg get https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit.git/config-connector/solutions/iam/kpt/project-iam project-iam - ``` -# REQUIREMENTS - * A working Config Connector cluster using "cnrm-system" service account - that has the `roles/resourcemanager.projectIamAdmin` role in your desired - project (it doesn't need to be the project managed by Config Connector). - * The project managed by Config Connector has Cloud Resource Manager API - enabled. -# SETTERS -| NAME | VALUE | SET BY | DESCRIPTION | COUNT | -|------------|----------------------|-----------------|--------------------------|-------| -| member | ${IAM_MEMBER?} | PLACEHOLDER | IAM member to grant role | 1 | -| project-id | ${PROJECT_ID?} | PLACEHOLDER | ID of project | 1 | -| role | roles/logging.viewer | package-default | IAM role to grant | 1 | -# USAGE -Setters marked as `PLACEHOLDER` are required. Set them using kpt: -``` -kpt cfg set . member user:name@example.com -kpt cfg set . project-id your-project -``` -_Optionally_ set the role to grant in the same manner. - -Once the configuration is satisfactory, apply the YAML: -``` -kubectl apply -f . -``` -# LICENSE - Apache 2.0 - See [LICENSE](/LICENSE) for more information. diff --git a/config-connector/solutions/iam/kpt/project-iam/iampolicymember.yaml b/config-connector/solutions/iam/kpt/project-iam/iampolicymember.yaml deleted file mode 100644 index 06ea7fbf1bf..00000000000 --- a/config-connector/solutions/iam/kpt/project-iam/iampolicymember.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: iam.cnrm.cloud.google.com/v1beta1 -kind: IAMPolicyMember -metadata: - name: project-iam-member -spec: - member: ${IAM_MEMBER?} # {"$ref":"#/definitions/io.k8s.cli.setters.member"} - role: roles/logging.viewer # {"$ref":"#/definitions/io.k8s.cli.setters.role"} - resourceRef: - apiVersion: resourcemanager.cnrm.cloud.google.com/v1beta1 - kind: Project - external: projects/${PROJECT_ID?} # {"$ref":"#/definitions/io.k8s.cli.substitutions.project-id-sub"} diff --git a/config-connector/solutions/iam/kpt/pubsub-subscription/Kptfile b/config-connector/solutions/iam/kpt/pubsub-subscription/Kptfile deleted file mode 100644 index 6c80f410a01..00000000000 --- a/config-connector/solutions/iam/kpt/pubsub-subscription/Kptfile +++ /dev/null @@ -1,36 +0,0 @@ -apiVersion: kpt.dev/v1alpha1 -kind: Kptfile -metadata: - name: pubsub-subscription -packageMetadata: - shortDescription: creates a pubsub subscription and configures permissions for it -openAPI: - definitions: - io.k8s.cli.setters.topic-name: - x-k8s-cli: - setter: - name: topic-name - value: allowed-topic - setBy: package-default - description: name of PubSub topic - io.k8s.cli.setters.iam-member: - description: IAM member to grant role - x-k8s-cli: - setter: - name: iam-member - value: ${IAM_MEMBER?} - setBy: PLACEHOLDER - io.k8s.cli.setters.subscription-name: - description: name of PubSub subscription - x-k8s-cli: - setter: - name: subscription-name - value: allowed-subscription - setBy: package-default - io.k8s.cli.setters.role: - description: IAM role to grant - x-k8s-cli: - setter: - name: role - value: roles/pubsub.viewer - setBy: package-default diff --git a/config-connector/solutions/iam/kpt/pubsub-subscription/README.md b/config-connector/solutions/iam/kpt/pubsub-subscription/README.md deleted file mode 100644 index dd43581e2fd..00000000000 --- a/config-connector/solutions/iam/kpt/pubsub-subscription/README.md +++ /dev/null @@ -1,44 +0,0 @@ -Pub/Sub Subscription -================================================== -# NAME - pubsub-subscription -# SYNOPSIS - This package creates a pubsub subscription and configures permissions for it by creating an IAMPolicyMember resource. -# CONSUMPTION - Download the package using [kpt](https://googlecontainertools.github.io/kpt/): - ``` - kpt pkg get https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit.git/config-connector/solutions/iam/kpt/pubsub-subscription pubsub-subscription - ``` -# REQUIREMENTS - - A working Config Connector instance using the "cnrm-system" service - account with either `roles/pubsub.admin` or `roles/owner` in the project - managed by Config Connector - - Cloud Pub/Sub API enabled in the project where Config Connector is - installed - - Cloud Pub/Sub API enabled in the project managed by Config Connector if it - is a different project -# SETTERS -| NAME | VALUE | SET BY | DESCRIPTION | COUNT | -|-------------------|----------------------|-----------------|-----------------------------|-------| -| iam-member | ${IAM_MEMBER?} | PLACEHOLDER | IAM member to grant role | 1 | -| role | roles/pubsub.viewer | package-default | IAM role to grant | 1 | -| subscription-name | allowed-subscription | package-default | name of PubSub subscription | 2 | -| topic-name | allowed-topic | package-default | name of PubSub topic | 2 | -# USAGE - Set the `iam-member` to grant a role to. - ``` - kpt cfg set . iam-member user:name@example.com - ``` - _Optionally_ set the `role` to grant. The default role is `roles/pubsub.viewer`. - ``` - kpt cfg set . role roles/pubsub.editor - ``` - _Optionally_ set `topic-name` and `subscription-name` in the same manner. Defaults are `allowed-topic` and `allowed-subscription`. - - Once the configuration is satisfactory, apply: - ``` - kubectl apply -f . - ``` -# LICENSE -Apache 2.0 - See [LICENSE](/LICENSE) for more information. - diff --git a/config-connector/solutions/iam/kpt/pubsub-subscription/iampolicymember.yaml b/config-connector/solutions/iam/kpt/pubsub-subscription/iampolicymember.yaml deleted file mode 100644 index 9a981694464..00000000000 --- a/config-connector/solutions/iam/kpt/pubsub-subscription/iampolicymember.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: iam.cnrm.cloud.google.com/v1beta1 -kind: IAMPolicyMember -metadata: - name: subscription-iam-member -spec: - member: ${IAM_MEMBER?} # {"$ref":"#/definitions/io.k8s.cli.setters.iam-member"} - role: roles/pubsub.viewer # {"$ref":"#/definitions/io.k8s.cli.setters.role"} - resourceRef: - apiVersion: pubsub.cnrm.cloud.google.com/v1beta1 - kind: PubSubSubscription - name: allowed-subscription # {"$ref":"#/definitions/io.k8s.cli.setters.subscription-name"} diff --git a/config-connector/solutions/iam/kpt/pubsub-subscription/pubsubsubscription.yaml b/config-connector/solutions/iam/kpt/pubsub-subscription/pubsubsubscription.yaml deleted file mode 100644 index 896ff306a69..00000000000 --- a/config-connector/solutions/iam/kpt/pubsub-subscription/pubsubsubscription.yaml +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: pubsub.cnrm.cloud.google.com/v1beta1 -kind: PubSubSubscription -metadata: - name: allowed-subscription # {"$ref":"#/definitions/io.k8s.cli.setters.subscription-name"} -spec: - ackDeadlineSeconds: 15 - messageRetentionDuration: 86400s - retainAckedMessages: false - topicRef: - name: allowed-topic # {"$ref":"#/definitions/io.k8s.cli.setters.topic-name"} diff --git a/config-connector/solutions/iam/kpt/pubsub-subscription/pubsubtopic.yaml b/config-connector/solutions/iam/kpt/pubsub-subscription/pubsubtopic.yaml deleted file mode 100644 index ea6e76790cb..00000000000 --- a/config-connector/solutions/iam/kpt/pubsub-subscription/pubsubtopic.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: pubsub.cnrm.cloud.google.com/v1beta1 -kind: PubSubTopic -metadata: - name: allowed-topic # {"$ref":"#/definitions/io.k8s.cli.setters.topic-name"} diff --git a/config-connector/solutions/iam/kpt/pubsub-topic/Kptfile b/config-connector/solutions/iam/kpt/pubsub-topic/Kptfile deleted file mode 100644 index a18b32faf6f..00000000000 --- a/config-connector/solutions/iam/kpt/pubsub-topic/Kptfile +++ /dev/null @@ -1,29 +0,0 @@ -apiVersion: kpt.dev/v1alpha1 -kind: Kptfile -metadata: - name: pubsub-topic -packageMetadata: - shortDescription: Create a PubSub topic and enable IAM roles for it -openAPI: - definitions: - io.k8s.cli.setters.iam-member: - description: identity to grant privileges - x-k8s-cli: - setter: - name: iam-member - value: ${IAM_MEMBER?} - setBy: PLACEHOLDER - io.k8s.cli.setters.topic-name: - description: name of PubSub topic - x-k8s-cli: - setter: - name: topic-name - value: allowed-topic - setBy: package-default - io.k8s.cli.setters.role: - description: IAM role to grant - x-k8s-cli: - setter: - name: role - value: roles/pubsub.editor - setBy: package-default diff --git a/config-connector/solutions/iam/kpt/pubsub-topic/README.md b/config-connector/solutions/iam/kpt/pubsub-topic/README.md deleted file mode 100644 index 5fec87600df..00000000000 --- a/config-connector/solutions/iam/kpt/pubsub-topic/README.md +++ /dev/null @@ -1,45 +0,0 @@ -Pub/Sub Topic -================================================== -# NAME - pubsub-topic -# SYNOPSIS - Config Connector compatible YAML files to grant a role to a particular IAM member for a PubSub topic. -# CONSUMPTION - Download the package using [kpt](https://googlecontainertools.github.io/kpt/). - ``` - kpt pkg get https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/config-connector/solutions/iam/kpt/pubsub-topic pubsub-topic - ``` -# REQUIREMENTS - - A working Config Connector instance using the "cnrm-system" service - account with either `roles/pubsub.admin` or `roles/owner` in the project - managed by Config Connector - - Cloud Pub/Sub API enabled in the project where Config Connector is - installed - - Cloud Pub/Sub API enabled in the project managed by Config Connector if it - is a different project -# SETTERS -| NAME | VALUE | SET BY | DESCRIPTION | COUNT | -|------------|---------------------|-----------------|------------------------------|-------| -| iam-member | ${IAM_MEMBER?} | PLACEHOLDER | identity to grant privileges | 1 | -| role | roles/pubsub.editor | package-default | IAM role to grant | 1 | -| topic-name | allowed-topic | package-default | name of PubSub topic | 2 | -# USAGE - - Replace `${IAM_MEMBER?}` with the GCP identity to grant access to. - ``` - kpt cfg set . iam-member user:name@example.com - ``` - Optionally set the name of the PubSub topic (defaults to `allowed-topic`) and -the role to grant (defaults to `roles/pubsub.editor`, full list of roles -[here](https://cloud.google.com/iam/docs/understanding-roles#pub-sub-roles)) - ``` - kpt cfg set . topic-name your-topic - kpt cfg set . role roles/pubsub.viewer - ``` - Once the values are satisfactory, apply the YAMLs. - ``` - kubectl apply -f . - ``` - Note: This will create the topic if it does not exist. -# LICENSE - Apache 2.0 - See [LICENSE](/LICENSE) for more information. diff --git a/config-connector/solutions/iam/kpt/pubsub-topic/iampolicymember.yaml b/config-connector/solutions/iam/kpt/pubsub-topic/iampolicymember.yaml deleted file mode 100644 index 46cd418503e..00000000000 --- a/config-connector/solutions/iam/kpt/pubsub-topic/iampolicymember.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: iam.cnrm.cloud.google.com/v1beta1 -kind: IAMPolicyMember -metadata: - name: topic-iam-member -spec: - member: ${IAM_MEMBER?} # {"$ref":"#/definitions/io.k8s.cli.setters.iam-member"} - role: roles/pubsub.editor # {"$ref":"#/definitions/io.k8s.cli.setters.role"} - resourceRef: - apiVersion: pubsub.cnrm.cloud.google.com/v1beta1 - kind: PubSubTopic - name: allowed-topic # {"$ref":"#/definitions/io.k8s.cli.setters.topic-name"} diff --git a/config-connector/solutions/iam/kpt/pubsub-topic/pubsubtopic.yaml b/config-connector/solutions/iam/kpt/pubsub-topic/pubsubtopic.yaml deleted file mode 100644 index ea6e76790cb..00000000000 --- a/config-connector/solutions/iam/kpt/pubsub-topic/pubsubtopic.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: pubsub.cnrm.cloud.google.com/v1beta1 -kind: PubSubTopic -metadata: - name: allowed-topic # {"$ref":"#/definitions/io.k8s.cli.setters.topic-name"} diff --git a/config-connector/solutions/iam/kpt/service-account/Kptfile b/config-connector/solutions/iam/kpt/service-account/Kptfile deleted file mode 100644 index 899409e4cd9..00000000000 --- a/config-connector/solutions/iam/kpt/service-account/Kptfile +++ /dev/null @@ -1,29 +0,0 @@ -apiVersion: kpt.dev/v1alpha1 -kind: Kptfile -metadata: - name: . -packageMetadata: - shortDescription: Grant an IAM role to a member for accessing a given service account -openAPI: - definitions: - io.k8s.cli.setters.service-account-name: - x-k8s-cli: - setter: - name: service-account-name - value: service-account-solution - setBy: package-default - description: The name of the new IAM service account. - io.k8s.cli.setters.iam-member: - x-k8s-cli: - setter: - name: iam-member - value: ${IAM_MEMBER?} - setBy: PLACEHOLDER - description: GCP identity to grant the role - io.k8s.cli.setters.role: - x-k8s-cli: - setter: - name: role - value: roles/iam.serviceAccountKeyAdmin - setBy: package-default - description: the IAM role to grant to the member diff --git a/config-connector/solutions/iam/kpt/service-account/README.md b/config-connector/solutions/iam/kpt/service-account/README.md deleted file mode 100644 index 13a014ddc3e..00000000000 --- a/config-connector/solutions/iam/kpt/service-account/README.md +++ /dev/null @@ -1,53 +0,0 @@ -Service Account -================================================== - -# NAME - - service-account - -# SYNOPSIS - - Config Connector compatible YAML files to create a service account in your desired project, and grant a specific member a role (default to `roles/iam.serviceAccountKeyAdmin`) for accessing the service account that just created. - -# CONSUMPTION - - Fetch the [kpt](https://googlecontainertools.github.io/kpt/) package of the solution: - - ``` - kpt pkg get https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit.git/config-connector/solutions/iam/kpt/service-account service-account - ``` - -# REQUIREMENTS - - A working Config Connector instance using the "cnrm-system" service account - with either `roles/iam.serviceAccountAdmin` or `roles/owner` in the project - managed by Config Connector. - -# USAGE - Replace `${IAM_MEMBER?}` with the GCP identity to grant access to: - ``` - kpt cfg set . iam-member user:name@example.com - ``` - - _Optionally_, you can change the following fields before you apply the YAMLs: - - the name of the service account: - ``` - kpt cfg set . service-account-name VALUE - ``` - - the role granted to the GCP identity. - (you can find all of the service account related IAM roles - [here](https://cloud.google.com/iam/docs/understanding-roles#service-accounts-roles)): - - ``` - kpt cfg set . role roles/iam.serviceAccountTokenCreator - ``` - - Apply the YAMLs: - - ``` - kubectl apply -f . - ``` - -# LICENSE - - Apache 2.0 - See [LICENSE](/LICENSE) for more information. diff --git a/config-connector/solutions/iam/kpt/service-account/iampolicymember.yaml b/config-connector/solutions/iam/kpt/service-account/iampolicymember.yaml deleted file mode 100644 index d4b88961926..00000000000 --- a/config-connector/solutions/iam/kpt/service-account/iampolicymember.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: iam.cnrm.cloud.google.com/v1beta1 -kind: IAMPolicyMember -metadata: - name: iampolicymember-service-account -spec: - member: ${IAM_MEMBER?} # {"$ref":"#/definitions/io.k8s.cli.setters.iam-member"} - role: roles/iam.serviceAccountKeyAdmin # {"$ref":"#/definitions/io.k8s.cli.setters.role"} - resourceRef: - apiVersion: iam.cnrm.cloud.google.com/v1beta1 - kind: IAMServiceAccount - name: service-account-solution # {"$ref":"#/definitions/io.k8s.cli.setters.service-account-name"} diff --git a/config-connector/solutions/iam/kpt/service-account/iamserviceaccount.yaml b/config-connector/solutions/iam/kpt/service-account/iamserviceaccount.yaml deleted file mode 100644 index 0bfe28b7bd4..00000000000 --- a/config-connector/solutions/iam/kpt/service-account/iamserviceaccount.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: iam.cnrm.cloud.google.com/v1beta1 -kind: IAMServiceAccount -metadata: - name: service-account-solution # {"$ref":"#/definitions/io.k8s.cli.setters.service-account-name"} diff --git a/config-connector/solutions/iam/kpt/storage-bucket-iam/Kptfile b/config-connector/solutions/iam/kpt/storage-bucket-iam/Kptfile deleted file mode 100644 index bc056b06377..00000000000 --- a/config-connector/solutions/iam/kpt/storage-bucket-iam/Kptfile +++ /dev/null @@ -1,29 +0,0 @@ -apiVersion: kpt.dev/v1alpha1 -kind: Kptfile -metadata: - name: storage-bucket-iam -packageMetadata: - shortDescription: Configure permissions for a storage bucket -openAPI: - definitions: - io.k8s.cli.setters.iam-member: - description: member to grant role - x-k8s-cli: - setter: - name: iam-member - value: ${IAM_MEMBER?} - setBy: PLACEHOLDER - io.k8s.cli.setters.bucket-name: - description: name of storage bucket - x-k8s-cli: - setter: - name: bucket-name - value: ${BUCKET_NAME?} - setBy: PLACEHOLDER - io.k8s.cli.setters.role: - description: IAM role to grant - x-k8s-cli: - setter: - name: role - value: roles/storage.objectViewer - setBy: package-default diff --git a/config-connector/solutions/iam/kpt/storage-bucket-iam/README.md b/config-connector/solutions/iam/kpt/storage-bucket-iam/README.md deleted file mode 100644 index 747349e1b97..00000000000 --- a/config-connector/solutions/iam/kpt/storage-bucket-iam/README.md +++ /dev/null @@ -1,45 +0,0 @@ -Storage Bucket IAM -================================================== -# NAME - storage-bucket-iam -# SYNOPSIS - Config Connector compatible yaml to enable permissions for a storage bucket. -# CONSUMPTION - Download the package using [kpt](https://googlecontainertools.github.io/kpt/). - ``` - kpt pkg get https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit.git/config-connector/solutions/iam/kpt/storage-bucket-iam storage-bucket-iam - ``` -# REQUIREMENTS -- A working Config Connector instance. -- A storage bucket managed by [IAM](https://cloud.google.com/storage/docs/access-control#using_permissions_with_acls). -- The "cnrm-system" service account with `roles/storage.admin` in either - the storage bucket or the project which owns the storage bucket. - - Note: Using [uniform bucket-level access control](https://cloud.google.com/storage/docs/uniform-bucket-level-access) is recommended for this package. -# SETTERS -| NAME | VALUE | SET BY | DESCRIPTION | COUNT | -|-------------|----------------------------|-----------------|------------------------|-------| -| bucket-name | ${BUCKET_NAME?} | PLACEHOLDER | name of storage bucket | 1 | -| iam-member | ${IAM_MEMBER?} | PLACEHOLDER | member to grant role | 1 | -| role | roles/storage.objectViewer | package-default | IAM role to grant | 1 | -# USAGE - Set the name of the bucket you want to configure permissions for. - ``` - kpt cfg set . bucket-name your-bucket - ``` - Set the IAM member to grant a role to. - ``` - kpt cfg set . iam-member user:name@example.com - ``` - Optionally, set the [storage - role](https://cloud.google.com/iam/docs/understanding-roles#storage-roles) (defaults to - `roles/storage.objectViewer`) that you want to apply and the IAM member the role will apply to. - ``` - kpt cfg set . role roles/storage.admin - ``` - Once the configuration is satisfactory, apply: - ``` - kubectl apply -f . - ``` -# LICENSE - Apache 2.0 - See [LICENSE](/LICENSE) for more information. diff --git a/config-connector/solutions/iam/kpt/storage-bucket-iam/iampolicymember.yaml b/config-connector/solutions/iam/kpt/storage-bucket-iam/iampolicymember.yaml deleted file mode 100644 index 54e0a93515f..00000000000 --- a/config-connector/solutions/iam/kpt/storage-bucket-iam/iampolicymember.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: iam.cnrm.cloud.google.com/v1beta1 -kind: IAMPolicyMember -metadata: - name: storage-bucket-iam-member -spec: - member: ${IAM_MEMBER?} # {"$ref":"#/definitions/io.k8s.cli.setters.iam-member"} - role: roles/storage.objectViewer # {"$ref":"#/definitions/io.k8s.cli.setters.role"} - resourceRef: - apiVersion: storage.cnrm.cloud.google.com/v1beta1 - kind: StorageBucket - external: ${BUCKET_NAME?} # {"$ref":"#/definitions/io.k8s.cli.setters.bucket-name"} diff --git a/config-connector/solutions/iam/kpt/subnet/Kptfile b/config-connector/solutions/iam/kpt/subnet/Kptfile deleted file mode 100644 index 57c0fb8d594..00000000000 --- a/config-connector/solutions/iam/kpt/subnet/Kptfile +++ /dev/null @@ -1,45 +0,0 @@ -apiVersion: kpt.dev/v1alpha1 -kind: Kptfile -metadata: - name: subnet -packageMetadata: - shortDescription: Create a subnet and grant a specific member a role to a member - for the subnet -openAPI: - definitions: - io.k8s.cli.setters.subnet-name: - x-k8s-cli: - setter: - name: subnet-name - value: subnet-solution - setBy: package-default - description: The name of the new subnet. - io.k8s.cli.setters.subnet-region: - x-k8s-cli: - setter: - name: subnet-region - value: us-central1 - setBy: package-default - description: The region of the new subnet. - io.k8s.cli.setters.compute-network-name: - x-k8s-cli: - setter: - name: compute-network-name - value: compute-network-example - setBy: package-default - description: The name of a existing GCP compute network for which you want to - create the subnet. - io.k8s.cli.setters.iam-member: - x-k8s-cli: - setter: - name: iam-member - value: ${IAM_MEMBER?} - setBy: PLACEHOLDER - description: GCP identity to grant the role - io.k8s.cli.setters.role: - x-k8s-cli: - setter: - name: role - value: roles/compute.networkUser - setBy: package-default - description: the IAM role to grant to the member diff --git a/config-connector/solutions/iam/kpt/subnet/README.md b/config-connector/solutions/iam/kpt/subnet/README.md deleted file mode 100644 index c0a79a87da6..00000000000 --- a/config-connector/solutions/iam/kpt/subnet/README.md +++ /dev/null @@ -1,69 +0,0 @@ -Subnet -================================================== - -# NAME - - subnet - -# SYNOPSIS - - Config Connector compatible YAML files to create a subnet in your desired project, and grant a specific member a role (default to `roles/compute.networkUser`) for accessing the subnet that just created. - -# CONSUMPTION - - Fetch the [kpt](https://googlecontainertools.github.io/kpt/) package of the solution: - - ``` - kpt pkg get https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit.git/config-connector/solutions/iam/kpt/subnet subnet - ``` - -# REQUIREMENTS - - - A working Config Connector instance using the "cnrm-system" service - account with either both `roles/compute.networkAdmin` and - `roles/iam.securityAdmin` roles or `roles/owner` in the project - managed by Config Connector. - - Compute Engine API enabled in the project where Config Connector is - installed - - Compute Engine API enabled in the project managed by Config Connector if - it is a different project - -# USAGE - Replace `${IAM_MEMBER?}` with the GCP identity to grant access to: - ``` - kpt cfg set . iam-member user:name@example.com - ``` - - _Optionally_, you can change the following fields before you apply the YAMLs: - - the name of the compute network - ``` - kpt cfg set . compute-network-name VALUE - ``` - - - the name of the subnet - ``` - kpt cfg set . subnet-name new-subnet-name - ``` - - - the region of the subnet - ``` - kpt cfg set . subnet-region us-west1 - ``` - - - the role granted to the GCP identity. - (you can find all of the subnet related IAM roles - [here](https://cloud.google.com/iam/docs/understanding-roles#compute-engine-roles)): - - ``` - kpt cfg set . role roles/compute.networkViewer - ``` - - Apply the YAMLs: - - ``` - kubectl apply -f . - ``` - -# LICENSE - - Apache 2.0 - See [LICENSE](/LICENSE) for more information. diff --git a/config-connector/solutions/iam/kpt/subnet/computenetwork.yaml b/config-connector/solutions/iam/kpt/subnet/computenetwork.yaml deleted file mode 100644 index 98d37363b00..00000000000 --- a/config-connector/solutions/iam/kpt/subnet/computenetwork.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: compute.cnrm.cloud.google.com/v1beta1 -kind: ComputeNetwork -metadata: - name: compute-network-example # {"$ref":"#/definitions/io.k8s.cli.setters.compute-network-name"} -spec: - routingMode: REGIONAL - autoCreateSubnetworks: false diff --git a/config-connector/solutions/iam/kpt/subnet/iampolicymember.yaml b/config-connector/solutions/iam/kpt/subnet/iampolicymember.yaml deleted file mode 100644 index ba90a0a419e..00000000000 --- a/config-connector/solutions/iam/kpt/subnet/iampolicymember.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: iam.cnrm.cloud.google.com/v1beta1 -kind: IAMPolicyMember -metadata: - name: iampolicymember-subnet -spec: - member: ${IAM_MEMBER?} # {"$ref":"#/definitions/io.k8s.cli.setters.iam-member"} - role: roles/compute.networkUser # {"$ref":"#/definitions/io.k8s.cli.setters.role"} - resourceRef: - apiVersion: compute.cnrm.cloud.google.com/v1beta1 - kind: ComputeSubnetwork - name: subnet-solution # {"$ref":"#/definitions/io.k8s.cli.setters.subnet-name"} diff --git a/config-connector/solutions/iam/kpt/subnet/subnet.yaml b/config-connector/solutions/iam/kpt/subnet/subnet.yaml deleted file mode 100644 index 7e703d7c9e0..00000000000 --- a/config-connector/solutions/iam/kpt/subnet/subnet.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: compute.cnrm.cloud.google.com/v1beta1 -kind: ComputeSubnetwork -metadata: - name: subnet-solution # {"$ref":"#/definitions/io.k8s.cli.setters.subnet-name"} -spec: - ipCidrRange: "10.0.0.0/9" - region: us-central1 # {"$ref":"#/definitions/io.k8s.cli.setters.subnet-region"} - privateIpGoogleAccess: false - networkRef: - name: compute-network-example # {"$ref":"#/definitions/io.k8s.cli.setters.compute-network-name"} - logConfig: - aggregationInterval: INTERVAL_10_MIN - flowSampling: 0.5 - metadata: INCLUDE_ALL_METADATA diff --git a/config-connector/solutions/networking/helm/autoneg/README.md b/config-connector/solutions/networking/helm/autoneg/README.md deleted file mode 100644 index 71719b80943..00000000000 --- a/config-connector/solutions/networking/helm/autoneg/README.md +++ /dev/null @@ -1,194 +0,0 @@ -# Autoneg - -================================================== - -## NAME - - autoneg - -## SYNOPSIS - -Autoneg solution uses network endpoint groups to provision load balancing across multiple clusters. -This solution uses `./cluster/templates/autoneg.yaml` from [gke-autoneg-controller](https://github.com/GoogleCloudPlatform/gke-autoneg-controller). -For demonstration purposes it uses a docker image with a simple Node.js service (bulankou/node-hello-world) with a single endpoint that prints out a message. - -## CONSUMPTION - - 1. Clone GoogleCloudPlatform/cloud-foundation-toolkit repository - - ```bash - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit.git - ``` - - 1. Go to the autoneg folder: - - ```bash - cd cloud-foundation-toolkit/config-connector/solutions/networking/helm/autoneg - ``` - -## REQUIREMENTS - -1. [Helm](../../../README.md#helm) -1. GKE Cluster with Config Connector. This solution assumes that all resources are installed in the same project, where the cluster with Config Connector is installed, and that load balancing resources are installed on the same cluster where Config Connector is installed. If you would like to configure your resources in a different project, the easiest approach would be to give your Config Connector service account (`cnrm-system`) owner permissions on this target project. -1. If your Config Connector version is earlier than [1.12.0](https://github.com/GoogleCloudPlatform/k8s-config-connector/releases) you need to apply [this workaround](https://github.com/GoogleCloudPlatform/k8s-config-connector/issues/78#issuecomment-577285402) to `iampolicymembers.iam.cnrm.cloud.google.com` CRD. -1. `compute.googleapis.com`, `container.googleapis.com` and `cloudresourcemanager.googleapis.com` APIs should be enabled on the project managed by Config Connector, in addition to the default services enabled. - -## USAGE - -All steps are run from this directory. - -1. Create the clusters. - - Review and update the values in `./clusters/values.yaml`. Note that if you change cluster name and location, you will need to change how they are used in `gcloud container clusters get-credentials` commands below. [PROJECT_ID] refers to the project where all the GCP resources will be created. - - ```bash - # validate your chart - helm lint ./clusters/ --set projectId=[PROJECT_ID] - - # check the output of your chart - helm template ./clusters/ --set projectId=[PROJECT_ID] - - # install your chart - helm install ./clusters/ --set projectId=[PROJECT_ID] --generate-name - ``` - -1. Create load balancing resources. - - Review and update the values in `./lb/values.yaml`. [PROJECT_ID] refers to the project where all the GCP resources will be created. - - ```bash - # validate your chart - helm lint ./lb/ --set projectId=[PROJECT_ID] - - # check the output of your chart - helm template ./lb/ --set projectId=[PROJECT_ID] - - # install your chart - helm install ./lb/ --set projectId=[PROJECT_ID] --generate-name - ``` - -1. Wait for clusters to be created - - ```bash - # The command uses cluster names based on the values passed in the ealier step - kubectl wait --for=condition=Ready containercluster/cluster-na containercluster/cluster-eu - ``` - -1. Configure first cluster - - ```bash - # switch the context to the first cluster. The command uses cluster name and zone based on the values used to create the clusters. - gcloud container clusters get-credentials cluster-na --zone=us-central1-b - - # validate your chart - helm lint ./workload/ --set projectId=[PROJECT_ID] --set localMessage="Hello from North America\!" - - # install your chart - helm install ./workload/ --set projectId=[PROJECT_ID] --set localMessage="Hello from North America\!" --generate-name - - # annotate service account - kubectl annotate sa -n autoneg-system default iam.gke.io/gcp-service-account=autoneg-system@[PROJECT_ID].iam.gserviceaccount.com - - # ensure pods are ready - kubectl wait --for=condition=Ready pods --all - - # check the service and ensure that `anthos.cft.dev/autoneg-status` annotation is present in the output - kubectl get svc node-app-backend -o=jsonpath='{.metadata.annotations}' - ``` - -1. Repeat the step for the other cluster - - ```bash - # switch the context to the second cluster. The command uses cluster name and zone based on the values used to create the clusters. - gcloud container clusters get-credentials cluster-eu --zone=europe-west2-a - - # validate your chart - helm lint ./workload/ --set projectId=[PROJECT_ID] --set localMessage="Hello from Europe\!" - - - # install your chart - helm install ./workload/ --set projectId=[PROJECT_ID] --set localMessage="Hello from Europe\!" --generate-name - - # annotate service account - kubectl annotate sa -n autoneg-system default iam.gke.io/gcp-service-account=autoneg-system@[PROJECT_ID].iam.gserviceaccount.com - - # ensure pods are ready - kubectl wait --for=condition=Ready pods --all - - # check the service and ensure that `anthos.cft.dev/autoneg-status` annotation is present in the output - kubectl get svc node-app-backend -o=jsonpath='{.metadata.annotations}' - ``` - -1. Switch the context to the cluster that contains the configs for load balancing resources and run verify that multi-cluster ingress is configured - - ```bash - # switch the context to the main cluster - gcloud container clusters get-credentials [CLUSTER NAME] --zone=[CLUSTER ZONE] - - # if you created the load balancing resources in the namespace, other than default, switch the context to that namespace - kubectl config set-context --current --namespace [NAMESPACE] - - # verify that your backend service has 2 backends attached (select index of "global" if prompted) - gcloud compute backend-services describe node-app-backend-service - ``` - - The backends section of the output should list both backends, for example: - - ```yaml - backends: - - balancingMode: RATE - capacityScaler: 1.0 - group: https://www.googleapis.com/compute/v1/projects//zones/us-central1-b/networkEndpointGroups/k8s1-37f1db7d-default-node-app-backend-80-486adca6 - maxRatePerEndpoint: 100.0 - - balancingMode: RATE - capacityScaler: 1.0 - group: https://www.googleapis.com/compute/v1/projects//zones/europe-west2-a/networkEndpointGroups/k8s1-292a63d7-default-node-app-backend-80-636c84c5 - maxRatePerEndpoint: 100.0 - connectionDraining: - drainingTimeoutSec: 300 - ``` - - Verify that load balancing resources are forwarding the request to the backend: - - ```bash - # curl the external address of the forwarding rule. Note that it might take around 5-10 minutes for load balancing to start working. - # You will see the message ("Hello from North America!" or "Hello from Europe!" based on your location). - curl $(kubectl get computeforwardingrule -o=jsonpath='{.items[0].spec.ipAddress.addressRef.external}') - -1. Clean up the installation: - - ```bash - # switch the context to the first cluster. The command uses cluster name and zone based on the values used to create the clusters. - gcloud container clusters get-credentials cluster-na --zone=us-central1-b - - # list Helm releases to obtain release name - helm list - - # delete release specifying release name from the previous command output. Note that can take a few minutes before all K8s resources are fully deleted. - helm delete [release_name] - - # switch the context to the second cluster. The command uses cluster name and zone based on the values used to create the clusters. - gcloud container clusters get-credentials cluster-eu --zone=europe-west2-a - - # list Helm releases to obtain release name - helm list - - # delete release specifying release name from the previous command output. Note that can take a few minutes before all K8s resources are fully deleted. - helm delete [release_name] - - # switch the context to the cluster that contains the configs for load balancing resources - gcloud container clusters get-credentials [CLUSTER NAME] --zone=[CLUSTER ZONE] - - # if you created the load balancing resources in the namespace, other than default, switch the context to that namespace - kubectl config set-context --current --namespace [NAMESPACE] - - # list Helm releases to obtain release name - helm list - - # delete release specifying release name from the previous command output. Note that can take a few minutes before all K8s resources are fully deleted. - helm delete [release_name] - ``` - -## LICENSE - -Apache 2.0 - See [LICENSE](/LICENSE) for more information. \ No newline at end of file diff --git a/config-connector/solutions/networking/helm/autoneg/clusters/Chart.yaml b/config-connector/solutions/networking/helm/autoneg/clusters/Chart.yaml deleted file mode 100644 index 0951d3644b0..00000000000 --- a/config-connector/solutions/networking/helm/autoneg/clusters/Chart.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -appVersion: "1.0" -description: A Helm chart to deploy the clusters for for GCP autoneg solution -name: gcp-autoneg-clusters -version: 0.1.0 -icon: https://upload.wikimedia.org/wikipedia/commons/thumb/c/c4/Globe_icon.svg/1024px-Globe_icon.svg.png diff --git a/config-connector/solutions/networking/helm/autoneg/clusters/templates/gcp-cluster-1.yaml b/config-connector/solutions/networking/helm/autoneg/clusters/templates/gcp-cluster-1.yaml deleted file mode 100644 index 367718fcb7c..00000000000 --- a/config-connector/solutions/networking/helm/autoneg/clusters/templates/gcp-cluster-1.yaml +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: container.cnrm.cloud.google.com/v1beta1 -kind: ContainerCluster -metadata: - name: {{ required "name is required!" .Values.cluster1.name }} -spec: - location: {{ required "location is required!" .Values.cluster1.zone }} - initialNodeCount: 2 - loggingService: logging.googleapis.com/kubernetes - monitoringService: monitoring.googleapis.com/kubernetes - ipAllocationPolicy: - servicesIpv4CidrBlock: "" - nodeConfig: - oauthScopes: - - "https://www.googleapis.com/auth/cloud-platform" - workloadIdentityConfig: - identityNamespace: '{{ required "identityNamespace is required!" .Values.projectId }}.svc.id.goog' - masterAuth: - username: '{{ required "username is required!" .Values.cluster1.masterAuthUser }}' - password: - valueFrom: - secretKeyRef: - name: cluster-1-secret-pwd - key: password - clientCertificateConfig: - issueClientCertificate: false diff --git a/config-connector/solutions/networking/helm/autoneg/clusters/templates/gcp-cluster-2.yaml b/config-connector/solutions/networking/helm/autoneg/clusters/templates/gcp-cluster-2.yaml deleted file mode 100644 index b689f4f985b..00000000000 --- a/config-connector/solutions/networking/helm/autoneg/clusters/templates/gcp-cluster-2.yaml +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: container.cnrm.cloud.google.com/v1beta1 -kind: ContainerCluster -metadata: - name: {{ required "name is required!" .Values.cluster2.name }} -spec: - location: {{ required "location is required!" .Values.cluster2.zone }} - initialNodeCount: 2 - loggingService: logging.googleapis.com/kubernetes - monitoringService: monitoring.googleapis.com/kubernetes - ipAllocationPolicy: - servicesIpv4CidrBlock: "" - nodeConfig: - oauthScopes: - - "https://www.googleapis.com/auth/cloud-platform" - workloadIdentityConfig: - identityNamespace: '{{ required "identityNamespace is required!" .Values.projectId }}.svc.id.goog' - masterAuth: - username: '{{ required "username is required!" .Values.cluster2.masterAuthUser }}' - password: - valueFrom: - secretKeyRef: - name: cluster-2-secret-pwd - key: password - clientCertificateConfig: - issueClientCertificate: false diff --git a/config-connector/solutions/networking/helm/autoneg/clusters/templates/k8s-cluster-1-secret-pwd.yaml b/config-connector/solutions/networking/helm/autoneg/clusters/templates/k8s-cluster-1-secret-pwd.yaml deleted file mode 100644 index aaec97ea8a0..00000000000 --- a/config-connector/solutions/networking/helm/autoneg/clusters/templates/k8s-cluster-1-secret-pwd.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: Secret -metadata: - name: cluster-1-secret-pwd -stringData: - password: {{ required "password is required!" .Values.cluster1.masterAuthPwd }} diff --git a/config-connector/solutions/networking/helm/autoneg/clusters/templates/k8s-cluster-2-secret-pwd.yaml b/config-connector/solutions/networking/helm/autoneg/clusters/templates/k8s-cluster-2-secret-pwd.yaml deleted file mode 100644 index 2b998f59bac..00000000000 --- a/config-connector/solutions/networking/helm/autoneg/clusters/templates/k8s-cluster-2-secret-pwd.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: Secret -metadata: - name: cluster-2-secret-pwd -stringData: - password: {{ required "password is required!" .Values.cluster2.masterAuthPwd }} diff --git a/config-connector/solutions/networking/helm/autoneg/clusters/values.yaml b/config-connector/solutions/networking/helm/autoneg/clusters/values.yaml deleted file mode 100644 index cb708fb7326..00000000000 --- a/config-connector/solutions/networking/helm/autoneg/clusters/values.yaml +++ /dev/null @@ -1,11 +0,0 @@ -projectId: -cluster1: - name: cluster-na - zone: us-central1-b - masterAuthUser: user - masterAuthPwd: password12345678 -cluster2: - name: cluster-eu - zone: europe-west2-a - masterAuthUser: user - masterAuthPwd: password12345678 diff --git a/config-connector/solutions/networking/helm/autoneg/lb/Chart.yaml b/config-connector/solutions/networking/helm/autoneg/lb/Chart.yaml deleted file mode 100644 index 6260cb11e2b..00000000000 --- a/config-connector/solutions/networking/helm/autoneg/lb/Chart.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -appVersion: "1.0" -description: A Helm chart to deploy load balancing resources for GCP autoneg solution -name: gcp-autoneg-lb -version: 0.1.0 -icon: https://upload.wikimedia.org/wikipedia/commons/thumb/c/c4/Globe_icon.svg/1024px-Globe_icon.svg.png diff --git a/config-connector/solutions/networking/helm/autoneg/lb/templates/gcp-backend-service.yaml b/config-connector/solutions/networking/helm/autoneg/lb/templates/gcp-backend-service.yaml deleted file mode 100644 index 66c8e4b6524..00000000000 --- a/config-connector/solutions/networking/helm/autoneg/lb/templates/gcp-backend-service.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: compute.cnrm.cloud.google.com/v1beta1 -kind: ComputeBackendService -metadata: - name: node-app-backend-service -spec: - healthChecks: - - healthCheckRef: - name: node-app-backend-healthcheck - protocol: HTTP - location: global - diff --git a/config-connector/solutions/networking/helm/autoneg/lb/templates/gcp-compute-firewall.yaml b/config-connector/solutions/networking/helm/autoneg/lb/templates/gcp-compute-firewall.yaml deleted file mode 100644 index 594187eccc9..00000000000 --- a/config-connector/solutions/networking/helm/autoneg/lb/templates/gcp-compute-firewall.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: compute.cnrm.cloud.google.com/v1beta1 -kind: ComputeFirewall -metadata: - name: fw-allow-autoneg -spec: - allow: - - protocol: tcp - sourceRanges: - - "130.211.0.0/22" - - "35.191.0.0/16" - networkRef: - name: default - diff --git a/config-connector/solutions/networking/helm/autoneg/lb/templates/gcp-compute-forwarding-rule.yaml b/config-connector/solutions/networking/helm/autoneg/lb/templates/gcp-compute-forwarding-rule.yaml deleted file mode 100644 index 3867e4c0a0c..00000000000 --- a/config-connector/solutions/networking/helm/autoneg/lb/templates/gcp-compute-forwarding-rule.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: compute.cnrm.cloud.google.com/v1beta1 -kind: ComputeForwardingRule -metadata: - name: node-app-forwarding-rule -spec: - target: - targetHTTPProxyRef: - name: node-app-target-proxy - portRange: "80" - ipProtocol: "TCP" - ipVersion: "IPV4" - location: global - diff --git a/config-connector/solutions/networking/helm/autoneg/lb/templates/gcp-compute-health-check.yaml b/config-connector/solutions/networking/helm/autoneg/lb/templates/gcp-compute-health-check.yaml deleted file mode 100644 index b6a031d8f06..00000000000 --- a/config-connector/solutions/networking/helm/autoneg/lb/templates/gcp-compute-health-check.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: compute.cnrm.cloud.google.com/v1beta1 -kind: ComputeHealthCheck -metadata: - name: node-app-backend-healthcheck -spec: - checkIntervalSec: 10 - tcpHealthCheck: - port: 8080 - location: global - diff --git a/config-connector/solutions/networking/helm/autoneg/lb/templates/gcp-custom-role-autoneg.yaml b/config-connector/solutions/networking/helm/autoneg/lb/templates/gcp-custom-role-autoneg.yaml deleted file mode 100644 index e9bf02f8b46..00000000000 --- a/config-connector/solutions/networking/helm/autoneg/lb/templates/gcp-custom-role-autoneg.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: iam.cnrm.cloud.google.com/v1beta1 -kind: IAMCustomRole -metadata: - name: autonegrole -spec: - title: Autoneg role - permissions: - - compute.backendServices.get - - compute.backendServices.update - - compute.networkEndpointGroups.use - - compute.healthChecks.useReadOnly diff --git a/config-connector/solutions/networking/helm/autoneg/lb/templates/gcp-default-network.yaml b/config-connector/solutions/networking/helm/autoneg/lb/templates/gcp-default-network.yaml deleted file mode 100644 index 09621376fe3..00000000000 --- a/config-connector/solutions/networking/helm/autoneg/lb/templates/gcp-default-network.yaml +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: compute.cnrm.cloud.google.com/v1beta1 -kind: ComputeNetwork -metadata: - name: default - annotations: - cnrm.cloud.google.com/deletion-policy: abandon -spec: - description: Default network for the project - diff --git a/config-connector/solutions/networking/helm/autoneg/lb/templates/gcp-policy-autoneg-custom.yaml b/config-connector/solutions/networking/helm/autoneg/lb/templates/gcp-policy-autoneg-custom.yaml deleted file mode 100644 index f78c941868e..00000000000 --- a/config-connector/solutions/networking/helm/autoneg/lb/templates/gcp-policy-autoneg-custom.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: iam.cnrm.cloud.google.com/v1beta1 -kind: IAMPolicyMember -metadata: - name: iampolicymember-autoneg-custom -spec: - member: serviceAccount:autoneg-system@{{ required "projectId is required!" .Values.projectId }}.iam.gserviceaccount.com - role: projects/{{ required "projectId is required!" .Values.projectId }}/roles/autonegrole - resourceRef: - apiVersion: resourcemanager.cnrm.cloud.google.com/v1beta1 - kind: Project - external: projects/{{ required "projectId is required!" .Values.projectId }} diff --git a/config-connector/solutions/networking/helm/autoneg/lb/templates/gcp-policy-autoneg-wi.yaml b/config-connector/solutions/networking/helm/autoneg/lb/templates/gcp-policy-autoneg-wi.yaml deleted file mode 100644 index 59f8a1580ce..00000000000 --- a/config-connector/solutions/networking/helm/autoneg/lb/templates/gcp-policy-autoneg-wi.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: iam.cnrm.cloud.google.com/v1beta1 -kind: IAMPolicy -metadata: - name: iampolicy-autoneg-workloadidentity -spec: - resourceRef: - apiVersion: iam.cnrm.cloud.google.com/v1beta1 - kind: IAMServiceAccount - name: autoneg-system - bindings: - - role: roles/iam.workloadIdentityUser - members: - - serviceAccount:{{ required "projectId is required!" .Values.projectId }}.svc.id.goog[autoneg-system/default] - diff --git a/config-connector/solutions/networking/helm/autoneg/lb/templates/gcp-service-account-autoneg.yaml b/config-connector/solutions/networking/helm/autoneg/lb/templates/gcp-service-account-autoneg.yaml deleted file mode 100644 index 1b7511a2c32..00000000000 --- a/config-connector/solutions/networking/helm/autoneg/lb/templates/gcp-service-account-autoneg.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: iam.cnrm.cloud.google.com/v1beta1 -kind: IAMServiceAccount -metadata: - name: autoneg-system -spec: - displayName: Autoneg Service Account diff --git a/config-connector/solutions/networking/helm/autoneg/lb/templates/gcp-target-http-proxy.yaml b/config-connector/solutions/networking/helm/autoneg/lb/templates/gcp-target-http-proxy.yaml deleted file mode 100644 index 58950a7be75..00000000000 --- a/config-connector/solutions/networking/helm/autoneg/lb/templates/gcp-target-http-proxy.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: compute.cnrm.cloud.google.com/v1beta1 -kind: ComputeTargetHTTPProxy -metadata: - name: node-app-target-proxy -spec: - description: Proxy for node app - urlMapRef: - name: node-app-url-map - location: global - diff --git a/config-connector/solutions/networking/helm/autoneg/lb/templates/gcp-url-map.yaml b/config-connector/solutions/networking/helm/autoneg/lb/templates/gcp-url-map.yaml deleted file mode 100644 index 6bda422e1e6..00000000000 --- a/config-connector/solutions/networking/helm/autoneg/lb/templates/gcp-url-map.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: compute.cnrm.cloud.google.com/v1beta1 -kind: ComputeURLMap -metadata: - name: node-app-url-map - labels: - retry: again -spec: - defaultService: - backendServiceRef: - name: node-app-backend-service - location: global - diff --git a/config-connector/solutions/networking/helm/autoneg/lb/values.yaml b/config-connector/solutions/networking/helm/autoneg/lb/values.yaml deleted file mode 100644 index 8ecefc53f10..00000000000 --- a/config-connector/solutions/networking/helm/autoneg/lb/values.yaml +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -projectId: -region: us-central1 diff --git a/config-connector/solutions/networking/helm/autoneg/workload/Chart.yaml b/config-connector/solutions/networking/helm/autoneg/workload/Chart.yaml deleted file mode 100644 index da5f60a24f6..00000000000 --- a/config-connector/solutions/networking/helm/autoneg/workload/Chart.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -appVersion: "1.0" -description: A Helm chart to deploy example workload configuration for GCP autoneg solution -name: gcp-autoneg-workload -version: 0.1.0 -icon: https://upload.wikimedia.org/wikipedia/commons/thumb/c/c4/Globe_icon.svg/1024px-Globe_icon.svg.png diff --git a/config-connector/solutions/networking/helm/autoneg/workload/templates/autoneg.yaml b/config-connector/solutions/networking/helm/autoneg/workload/templates/autoneg.yaml deleted file mode 100644 index a609d36df60..00000000000 --- a/config-connector/solutions/networking/helm/autoneg/workload/templates/autoneg.yaml +++ /dev/null @@ -1,208 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: Namespace -metadata: - labels: - control-plane: controller-manager - name: autoneg-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: autoneg-leader-election-role - namespace: autoneg-system -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - get - - list - - watch - - create - - update - - patch - - delete -- apiGroups: - - "" - resources: - - configmaps/status - verbs: - - get - - update - - patch -- apiGroups: - - "" - resources: - - events - verbs: - - create ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - creationTimestamp: null - name: autoneg-manager-role -rules: -- apiGroups: - - "" - resources: - - services - verbs: - - get - - list - - watch - - create - - update - - patch - - delete -- apiGroups: - - "" - resources: - - services/status - verbs: - - get - - update - - patch -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: autoneg-proxy-role -rules: -- apiGroups: - - authentication.k8s.io - resources: - - tokenreviews - verbs: - - create -- apiGroups: - - authorization.k8s.io - resources: - - subjectaccessreviews - verbs: - - create ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: autoneg-leader-election-rolebinding - namespace: autoneg-system -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: autoneg-leader-election-role -subjects: -- kind: ServiceAccount - name: default - namespace: autoneg-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: autoneg-manager-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: autoneg-manager-role -subjects: -- kind: ServiceAccount - name: default - namespace: autoneg-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: autoneg-proxy-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: autoneg-proxy-role -subjects: -- kind: ServiceAccount - name: default - namespace: autoneg-system ---- -apiVersion: v1 -kind: Service -metadata: - annotations: - prometheus.io/port: "8443" - prometheus.io/scheme: https - prometheus.io/scrape: "true" - labels: - control-plane: controller-manager - name: autoneg-controller-manager-metrics-service - namespace: autoneg-system -spec: - ports: - - name: https - port: 8443 - targetPort: https - selector: - control-plane: controller-manager ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - control-plane: controller-manager - name: autoneg-controller-manager - namespace: autoneg-system -spec: - replicas: 1 - selector: - matchLabels: - control-plane: controller-manager - template: - metadata: - labels: - control-plane: controller-manager - spec: - containers: - - args: - - --secure-listen-address=0.0.0.0:8443 - - --upstream=http://127.0.0.1:8080/ - - --logtostderr=true - - --v=10 - image: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.0 - name: kube-rbac-proxy - ports: - - containerPort: 8443 - name: https - - args: - - --metrics-addr=127.0.0.1:8080 - - --enable-leader-election - command: - - /manager - image: gcr.io/soell-labs/gke-autoneg-controller:latest - name: manager - resources: - limits: - cpu: 100m - memory: 30Mi - requests: - cpu: 100m - memory: 20Mi - terminationGracePeriodSeconds: 10 diff --git a/config-connector/solutions/networking/helm/autoneg/workload/templates/deployment.yaml b/config-connector/solutions/networking/helm/autoneg/workload/templates/deployment.yaml deleted file mode 100644 index fe6b942c09f..00000000000 --- a/config-connector/solutions/networking/helm/autoneg/workload/templates/deployment.yaml +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: node-app-deployment - labels: - app: node-app -spec: - replicas: 3 - selector: - matchLabels: - app: node-app - template: - metadata: - labels: - app: node-app - spec: - containers: - - name: node-app-container - image: bulankou/node-hello-world - env: - - name: HELLO_MESSAGE - value: {{ .Values.localMessage}} - ports: - - containerPort: 8080 - readinessProbe: - httpGet: - path: / - port: 8080 - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 10 - failureThreshold: 10 - successThreshold: 1 - livenessProbe: - httpGet: - path: / - port: 8080 - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 10 - failureThreshold: 20 - successThreshold: 1 diff --git a/config-connector/solutions/networking/helm/autoneg/workload/templates/service.yaml b/config-connector/solutions/networking/helm/autoneg/workload/templates/service.yaml deleted file mode 100644 index cf58828afa5..00000000000 --- a/config-connector/solutions/networking/helm/autoneg/workload/templates/service.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: Service -metadata: - name: node-app-backend - annotations: - cloud.google.com/neg: '{"exposed_ports": {"80":{}}}' - anthos.cft.dev/autoneg: '{"name":"node-app-backend-service", "max_rate_per_endpoint":100}' -spec: - type: ClusterIP - selector: - app: node-app - ports: - - port: 80 - targetPort: 8080 diff --git a/config-connector/solutions/networking/helm/autoneg/workload/values.yaml b/config-connector/solutions/networking/helm/autoneg/workload/values.yaml deleted file mode 100644 index 63a08b0d606..00000000000 --- a/config-connector/solutions/networking/helm/autoneg/workload/values.yaml +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -projectId: -localMessage: diff --git a/config-connector/solutions/projects/helm/owned-project/Chart.yaml b/config-connector/solutions/projects/helm/owned-project/Chart.yaml deleted file mode 100644 index f813dc220ce..00000000000 --- a/config-connector/solutions/projects/helm/owned-project/Chart.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -name: owned-project -version: 0.1.0 -description: create a project in a folder binding an IAM member as project owner diff --git a/config-connector/solutions/projects/helm/owned-project/README.md b/config-connector/solutions/projects/helm/owned-project/README.md deleted file mode 100644 index 4474dfeaf46..00000000000 --- a/config-connector/solutions/projects/helm/owned-project/README.md +++ /dev/null @@ -1,89 +0,0 @@ -# Owned Project -================================================== - -## NAME - - owned-project - -## SYNOPSIS - - Config Connector compatible YAML files to create - a project in a folder, binding an IAM member - as project owner. - -## CONSUMPTION - -1. Clone GoogleCloudPlatform/cloud-foundation-toolkit repository: - - ```bash - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit.git - ``` - -1. Go to the owned-project folder: - - ```bash - cd cloud-foundation-toolkit/config-connector/solutions/projects/helm/owned-project - ``` - -## REQUIREMENTS - -1. GKE Cluster with Config Connector and [Workload Identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#enable_workload_identity_on_a_new_cluster). -1. [Helm](../../../README.md#helm) -1. The "cnrm-system" service account assigned with - - `roles/resourcemanager.folderViewer` - - `roles/resourcemanager.projectCreator` - - `roles/iam.securityAdmin` - in the target folder -1. The IAM member selected below must meet the requirements specified - [here](https://cloud.google.com/resource-manager/reference/rest/v1/projects/setIamPolicy#top_of_page). - -## USAGE - -All steps are run from the current directory ([config-connector/solutions/projects/helm/owned-project](.)). - -1. Review and update the values in `./values.yaml`. - -1. Validate and install the sample with Helm, with all values replaced with the ones you desire. - - ```bash - # validate your chart - helm lint . --set billingID=BILLING_ID,folderID=FOLDER_ID,iamMember=user:name@example.com,projectID=PROJECT_ID - - # check the output of your chart - helm template . --set billingID=BILLING_ID,folderID=FOLDER_ID,iamMember=user:name@example.com,projectID=PROJECT_ID - - # do a dryrun on your chart and address issues if there are any - helm install . --dry-run --set billingID=BILLING_ID,folderID=FOLDER_ID,iamMember=user:name@example.com,projectID=PROJECT_ID --generate-name - - # install your chart - helm install . --set billingID=BILLING_ID,folderID=FOLDER_ID,iamMember=user:name@example.com,projectID=PROJECT_ID --generate-name - ``` - -1. Check the created helm release to verify the installation: - ```bash - helm list - ``` - - Check the status of the project, where `PROJECT_ID` is the project ID value you gave above: - ```bash - kubectl describe gcpproject PROJECT_ID - ``` - - Check the status of the IAM Policy Member: - ```bash - kubectl describe iampolicymember owned-project-iampolicymember - ``` - -1. Clean up the installation: - - ```bash - # list Helm releases to obtain release name - helm list - - # delete release specifying release name from the previous command output. - helm delete [release_name] - ``` - -## LICENSE - -Apache 2.0 - See [LICENSE](/LICENSE) for more information. diff --git a/config-connector/solutions/projects/helm/owned-project/templates/iampolicymember.yaml b/config-connector/solutions/projects/helm/owned-project/templates/iampolicymember.yaml deleted file mode 100644 index 7526a5c0b74..00000000000 --- a/config-connector/solutions/projects/helm/owned-project/templates/iampolicymember.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: iam.cnrm.cloud.google.com/v1beta1 -kind: IAMPolicyMember -metadata: - name: owned-project-iampolicymember -spec: - member: {{ required "IAM member is required!" .Values.iamMember }} - role: roles/owner - resourceRef: - name: {{ required "Project ID is required!" .Values.projectID }} - kind: Project - apiVersion: resourcemanager.cnrm.cloud.google.com/v1beta1 diff --git a/config-connector/solutions/projects/helm/owned-project/templates/project.yaml b/config-connector/solutions/projects/helm/owned-project/templates/project.yaml deleted file mode 100644 index a0e179e5685..00000000000 --- a/config-connector/solutions/projects/helm/owned-project/templates/project.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: resourcemanager.cnrm.cloud.google.com/v1beta1 -kind: Project -metadata: - annotations: - cnrm.cloud.google.com/folder-id: {{ required "Folder ID is required!" .Values.folderID | quote }} - name: {{ required "Project ID is required!" .Values.projectID }} -spec: - name: {{ required "Project ID is required!" .Values.projectID }} - billingAccountRef: - external: {{ required "Billing ID is required!" .Values.billingID | quote }} diff --git a/config-connector/solutions/projects/helm/owned-project/values.yaml b/config-connector/solutions/projects/helm/owned-project/values.yaml deleted file mode 100644 index f592ec03d70..00000000000 --- a/config-connector/solutions/projects/helm/owned-project/values.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -billingID: -folderID: -iamMember: -projectID: diff --git a/config-connector/solutions/projects/helm/project-hierarchy/README.md b/config-connector/solutions/projects/helm/project-hierarchy/README.md deleted file mode 100644 index b6028afcbef..00000000000 --- a/config-connector/solutions/projects/helm/project-hierarchy/README.md +++ /dev/null @@ -1,101 +0,0 @@ -# Project Hierarchy -================================================== - -## NAME - - project-hierarchy - -## SYNOPSIS - - Config Connector compatible YAML files to create - a folder in an organization, and a project - beneath it. - -## CONSUMPTION - -1. Clone GoogleCloudPlatform/cloud-foundation-toolkit repository: - - ```bash - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit.git - ``` - -1. Go to the project-hierarchy folder: - - ```bash - cd cloud-foundation-toolkit/config-connector/solutions/projects/helm/project-hierarchy - ``` - -## REQUIREMENTS - -1. GKE Cluster with Config Connector and [Workload Identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#enable_workload_identity_on_a_new_cluster). -1. [Helm](../../../README.md#helm) -1. The "cnrm-system" service account assigned with - - `roles/resourcemanager.folderCreator` - - `roles/resourcemanager.projectCreator` - in the target organization - -## USAGE - -All steps are run from the current directory ([config-connector/solutions/projects/helm/project-hierarchy](.)). - -1. Review and update the values in `./folder/values.yaml` and `./project/values.yaml`, except folderID, which you will find in a later step. - -1. Install the folder Helm chart: - - ```bash - # validate your chart - helm lint ./folder - - # do a dryrun on your chart and address issues if there are any - helm install ./folder --dry-run --generate-name - - # install your chart - helm install ./folder --generate-name - ``` - -1. Check the created helm release to verify the installation: - - ```bash - helm list - ``` - - Check the status of the folder you just created: - ```bash - kubectl describe gcpfolder project-hierarchy-folder - ``` - -1. Find the ID of the created folder. If you replaced the `folderName` value in step 1, replace `project-hierarchy-folder` below with the value you chose: - - ```bash - kubectl describe gcpfolder project-hierarchy-folder | grep Name:\ *folders\/ | sed "s/.*folders\///" - ``` - -1. Update `./project/values.yaml` with this folderID value. - -1. Install the project helm chart: - - ```bash - # validate your chart - helm lint ./project - - # do a dryrun on your chart and address issues if there are any - helm install ./project --dry-run --generate-name - - # install your chart - helm install ./project --generate-name - ``` - -1. Clean up the installations: - - ```bash - # list Helm releases to obtain release name - helm list - - # delete releases specifying release names from the previous command output. - helm delete [release_name] - helm delete [release_name] - ``` - -## LICENSE - -Apache 2.0 - See [LICENSE](/LICENSE) for more information. diff --git a/config-connector/solutions/projects/helm/project-hierarchy/folder/Chart.yaml b/config-connector/solutions/projects/helm/project-hierarchy/folder/Chart.yaml deleted file mode 100644 index 1fd83abea0c..00000000000 --- a/config-connector/solutions/projects/helm/project-hierarchy/folder/Chart.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -name: new-folder -version: 0.1.0 -description: create a folder in an organization diff --git a/config-connector/solutions/projects/helm/project-hierarchy/folder/templates/folder.yaml b/config-connector/solutions/projects/helm/project-hierarchy/folder/templates/folder.yaml deleted file mode 100644 index f498947892f..00000000000 --- a/config-connector/solutions/projects/helm/project-hierarchy/folder/templates/folder.yaml +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: resourcemanager.cnrm.cloud.google.com/v1beta1 -kind: Folder -metadata: - annotations: - cnrm.cloud.google.com/organization-id: {{ required "Organization ID is required!" .Values.orgID | quote }} - name: {{ .Values.folderName }} -spec: - displayName: {{ .Values.folderName }} diff --git a/config-connector/solutions/projects/helm/project-hierarchy/folder/values.yaml b/config-connector/solutions/projects/helm/project-hierarchy/folder/values.yaml deleted file mode 100644 index 317514ec405..00000000000 --- a/config-connector/solutions/projects/helm/project-hierarchy/folder/values.yaml +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -folderName: project-hierarchy-folder -orgID: diff --git a/config-connector/solutions/projects/helm/project-hierarchy/project/Chart.yaml b/config-connector/solutions/projects/helm/project-hierarchy/project/Chart.yaml deleted file mode 100644 index b81033eea68..00000000000 --- a/config-connector/solutions/projects/helm/project-hierarchy/project/Chart.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -name: project-hierarchy -version: 0.1.0 -description: create project in a folder diff --git a/config-connector/solutions/projects/helm/project-hierarchy/project/templates/project.yaml b/config-connector/solutions/projects/helm/project-hierarchy/project/templates/project.yaml deleted file mode 100644 index a0e179e5685..00000000000 --- a/config-connector/solutions/projects/helm/project-hierarchy/project/templates/project.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: resourcemanager.cnrm.cloud.google.com/v1beta1 -kind: Project -metadata: - annotations: - cnrm.cloud.google.com/folder-id: {{ required "Folder ID is required!" .Values.folderID | quote }} - name: {{ required "Project ID is required!" .Values.projectID }} -spec: - name: {{ required "Project ID is required!" .Values.projectID }} - billingAccountRef: - external: {{ required "Billing ID is required!" .Values.billingID | quote }} diff --git a/config-connector/solutions/projects/helm/project-hierarchy/project/values.yaml b/config-connector/solutions/projects/helm/project-hierarchy/project/values.yaml deleted file mode 100644 index 0e23b39e89d..00000000000 --- a/config-connector/solutions/projects/helm/project-hierarchy/project/values.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -folderID: -billingID: -projectID: diff --git a/config-connector/solutions/projects/helm/project-services/Chart.yaml b/config-connector/solutions/projects/helm/project-services/Chart.yaml deleted file mode 100644 index e1e57f0b2f8..00000000000 --- a/config-connector/solutions/projects/helm/project-services/Chart.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -name: project-services -version: 0.1.0 -description: Enable a service on a desired project diff --git a/config-connector/solutions/projects/helm/project-services/README.md b/config-connector/solutions/projects/helm/project-services/README.md deleted file mode 100644 index f4e58b86905..00000000000 --- a/config-connector/solutions/projects/helm/project-services/README.md +++ /dev/null @@ -1,97 +0,0 @@ -# Project Services - -================================================== - -## NAME - - project-services - -## SYNOPSIS - - Config Connector compatible YAML files to enable services on a desired project. - -## CONSUMPTION - - 1. Clone GoogleCloudPlatform/cloud-foundation-toolkit repository: - - ```bash - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit.git - ``` - - 1. Go to the project-services folder: - - ```bash - cd cloud-foundation-toolkit/config-connector/solutions/projects/helm/project-services - ``` - -## REQUIREMENTS - -1. GKE Cluster with [Config Connector installed using a GKE Workload Identity](https://cloud.google.com/config-connector/docs/how-to/install-upgrade-uninstall#workload-identity). - -1. Cloud Resource Manager API enabled in the project where Config Connector is installed. - -1. The "cnrm-system" service account that has the `roles/serviceusage.serviceUsageAdmin` or `roles/owner` role in your desired project. - -1. [Helm](../../../README.md#helm) - -## USAGE - -All steps are run from the current directory ([config-connector/solutions/projects/helm/project-services](.)). - -1. Review and update the values in `./values.yaml`. - -1. Create a namespace. If you want to use your existing namespace skip this step and use your own namespace name instead of `project-annotated` in all other steps. - - ```bash - kubectl create namespace project-annotated - ``` - -1. Validate and install the sample with Helm. `PROJECT_ID` should be the project ID of the desired project. - - ```bash - # validate your chart - helm lint . --set ProjectID=PROJECT_ID --namespace project-annotated - - # check the output of your chart - helm template . --set ProjectID=PROJECT_ID --namespace project-annotated - - # do a dryrun on your chart and address issues if there are any - helm install . --dry-run --set ProjectID=PROJECT_ID --namespace project-annotated --generate-name - - # install your chart - helm install . --set ProjectID=PROJECT_ID --namespace project-annotated --generate-name - ``` - -1. _Optionally_ set `Service.Name` in the same manner. - - ```bash - helm install . --set ProjectID=PROJECT_ID,Service.Name=compute.googleapis.com - ``` - - The package-default value will enable [Firebase](https://firebase.google.com/docs). - -1. Check the created helm release to verify the installation: - - ```bash - helm list - ``` - - Check the status of the applied YAML by specifying namespace: - - ```bash - kubectl describe gcpservice --namespace project-annotated - ``` - -1. Clean up the installation: - - ```bash - # list Helm releases to obtain release name - helm list - - # delete release specifying release name from the previous command output. - helm delete [release_name] - ``` - -## LICENSE - -Apache 2.0 - See [LICENSE](/LICENSE) for more information. diff --git a/config-connector/solutions/projects/helm/project-services/templates/service.yaml b/config-connector/solutions/projects/helm/project-services/templates/service.yaml deleted file mode 100644 index fa4fe3bc26c..00000000000 --- a/config-connector/solutions/projects/helm/project-services/templates/service.yaml +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: serviceusage.cnrm.cloud.google.com/v1beta1 -kind: Service -metadata: - annotations: - cnrm.cloud.google.com/project-id: {{ .Values.ProjectID }} - name: {{ .Values.Service.Name }} diff --git a/config-connector/solutions/projects/helm/project-services/values.yaml b/config-connector/solutions/projects/helm/project-services/values.yaml deleted file mode 100644 index f023dae8ff4..00000000000 --- a/config-connector/solutions/projects/helm/project-services/values.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -ProjectID: - -Service: - Name: firebasehosting.googleapis.com diff --git a/config-connector/solutions/projects/helm/simple-project/Chart.yaml b/config-connector/solutions/projects/helm/simple-project/Chart.yaml deleted file mode 100644 index 14105799c48..00000000000 --- a/config-connector/solutions/projects/helm/simple-project/Chart.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -name: simple-project -version: 0.1.0 -description: Create a project in an organization diff --git a/config-connector/solutions/projects/helm/simple-project/README.md b/config-connector/solutions/projects/helm/simple-project/README.md deleted file mode 100644 index 3b0d6a56b5b..00000000000 --- a/config-connector/solutions/projects/helm/simple-project/README.md +++ /dev/null @@ -1,81 +0,0 @@ -# Simple Project - -================================================== - -## NAME - - simple-project - -## SYNOPSIS - - Config Connector compatible YAML files to create a project in an organization. - -## CONSUMPTION - - 1. Clone GoogleCloudPlatform/cloud-foundation-toolkit repository: - - ```bash - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit.git - ``` - - 1. Go to the simple-project folder: - - ```bash - cd cloud-foundation-toolkit/config-connector/solutions/projects/helm/simple-project - ``` - -## REQUIREMENTS - -1. GKE Cluster with Config Connector and [Workload Identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#enable_workload_identity_on_a_new_cluster). - -1. A working Config Connector cluster using the "cnrm-system" service account with _minimally_ the permissions given by the following role in the desired organization: - - `roles/resourcemanager.projectCreator` - -1. [Helm](../../../README.md#helm) - -## USAGE - -All steps are run from the current directory ([config-connector/solutions/projects/helm/simple-project](.)). - -1. Review and update the values in `./values.yaml`. - -1. Validate and install the sample with Helm. `ORG_ID` should be your organization ID, `PROJECT_ID` should be a new project ID unique within GCS, and `BILLING_ID` should be your desired billing ID for the new project. - - ```bash - # validate your chart - helm lint . --set billingID=BILLING_ID,orgID=ORG_ID,projectID=PROJECT_ID - - # check the output of your chart - helm template . --set billingID=BILLING_ID,orgID=ORG_ID,projectID=PROJECT_ID - - # do a dryrun on your chart and address issues if there are any - helm install . --dry-run --set billingID=BILLING_ID,orgID=ORG_ID,projectID=PROJECT_ID --generate-name - - # install your chart - helm install . --set billingID=BILLING_ID,orgID=ORG_ID,projectID=PROJECT_ID --generate-name - ``` - -1. Check the created helm release to verify the installation: - ```bash - helm list - ``` - - Check the status of the applied YAML: - ```bash - kubectl describe gcpprojects PROJECT_ID - ``` - where `PROJECT_ID` is your project ID above. - -1. Clean up the installation: - - ```bash - # list Helm releases to obtain release name - helm list - - # delete release specifying release name from the previous command output. - helm delete [release_name] - ``` - -## LICENSE - -Apache 2.0 - See [LICENSE](/LICENSE) for more information. diff --git a/config-connector/solutions/projects/helm/simple-project/templates/project.yaml b/config-connector/solutions/projects/helm/simple-project/templates/project.yaml deleted file mode 100644 index 6437f34dda6..00000000000 --- a/config-connector/solutions/projects/helm/simple-project/templates/project.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: resourcemanager.cnrm.cloud.google.com/v1beta1 -kind: Project -metadata: - annotations: - cnrm.cloud.google.com/organization-id: {{ required "Organization ID is required!" .Values.orgID | quote }} - name: {{ required "Project ID is required!" .Values.projectID }} -spec: - name: {{ required "Project ID is required!" .Values.projectID }} - billingAccountRef: - external: {{ required "Billing ID is required!" .Values.billingID | quote}} diff --git a/config-connector/solutions/projects/helm/simple-project/values.yaml b/config-connector/solutions/projects/helm/simple-project/values.yaml deleted file mode 100644 index b185db3a7ea..00000000000 --- a/config-connector/solutions/projects/helm/simple-project/values.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -billingID: -orgID: -projectID: diff --git a/config-connector/solutions/projects/kpt/owned-project/Kptfile b/config-connector/solutions/projects/kpt/owned-project/Kptfile deleted file mode 100644 index ceb31a522f6..00000000000 --- a/config-connector/solutions/projects/kpt/owned-project/Kptfile +++ /dev/null @@ -1,37 +0,0 @@ -apiVersion: kpt.dev/v1alpha1 -kind: Kptfile -metadata: - name: owned-project -packageMetadata: - shortDescription: Create a project under a folder and grant the owner role for the - project -openAPI: - definitions: - io.k8s.cli.setters.project-id: - description: ID of project - x-k8s-cli: - setter: - name: project-id - value: ${PROJECT_ID?} - setBy: PLACEHOLDER - io.k8s.cli.setters.billing-account: - description: ID of project billing account - x-k8s-cli: - setter: - name: billing-account - value: ${BILLING_ACCOUNT_ID?} - setBy: PLACEHOLDER - io.k8s.cli.setters.folder-id: - description: numeric GCP ID of folder - x-k8s-cli: - setter: - name: folder-id - value: "${FOLDER_ID?}" - setBy: PLACEHOLDER - io.k8s.cli.setters.iam-member: - description: cloud identity name to give project owner permission to - x-k8s-cli: - setter: - name: iam-member - value: ${IAM_MEMBER?} - setBy: PLACEHOLDER diff --git a/config-connector/solutions/projects/kpt/owned-project/README.md b/config-connector/solutions/projects/kpt/owned-project/README.md deleted file mode 100644 index 88122827e94..00000000000 --- a/config-connector/solutions/projects/kpt/owned-project/README.md +++ /dev/null @@ -1,56 +0,0 @@ -Owned Project -================================================== - -# NAME - - owned-project - -# SYNOPSIS - - Config Connector compatible YAML files to create - a project in a folder, binding an IAM member - as project owner. - -# CONSUMPTION - - Fetch the package using [kpt](https://googlecontainertools.github.io/kpt/). - - `kpt pkg get https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit.git/config-connector/solutions/projects/kpt/owned-project owned-project` - -# REQUIREMENTS - - - A working Config Connector cluster using the cnrm-system service account - with the following roles in the target folder: - - `roles/resourcemanager.folderViewer` - - `roles/resourcemanager.projectCreator` - - `roles/iam.securityAdmin` - - The IAM member meets the requirements specified - [here](https://cloud.google.com/resource-manager/reference/rest/v1/projects/setIamPolicy#top_of_page). - -# USAGE - - Replace the - `${BILLING_ACCOUNT_ID?}` value. - From within this directory, run - ``` - kpt cfg set . billing-account VALUE - ``` - replacing `VALUE` with your billing account - ID. - - Replace the `${FOLDER_ID?}`, `${IAM_MEMBER?}`, and `${PROJECT_ID?}` values the same way, using: - ``` - kpt cfg set . folder-id VALUE - kpt cfg set . iam-member VALUE - kpt cfg set . project-id VALUE - ``` - where the folder-id `VALUE` is the numeric folder ID of the folder to create the new project under, the iam-member `VALUE` is the fully qualified IAM name of target member, e.g. "user:me@example.com", and the project-id `VALUE` is the globally unique name you want your project to have. - - Now you can fully apply this solution. - ``` - kubectl apply -f . - ``` - -# LICENSE - -Apache 2.0 - See [LICENSE](/LICENSE) for more information. diff --git a/config-connector/solutions/projects/kpt/owned-project/iampolicymember.yaml b/config-connector/solutions/projects/kpt/owned-project/iampolicymember.yaml deleted file mode 100644 index 4862d3eb44a..00000000000 --- a/config-connector/solutions/projects/kpt/owned-project/iampolicymember.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: iam.cnrm.cloud.google.com/v1beta1 -kind: IAMPolicyMember -metadata: - name: owned-project-iampolicymember -spec: - member: "${IAM_MEMBER?}" # {"$ref":"#/definitions/io.k8s.cli.setters.iam-member"} - role: roles/owner - resourceRef: - name: ${PROJECT_ID?} # {"$ref":"#/definitions/io.k8s.cli.setters.project-id"} - kind: Project - apiVersion: resourcemanager.cnrm.cloud.google.com/v1beta1 diff --git a/config-connector/solutions/projects/kpt/owned-project/project.yaml b/config-connector/solutions/projects/kpt/owned-project/project.yaml deleted file mode 100644 index a09c603f8a6..00000000000 --- a/config-connector/solutions/projects/kpt/owned-project/project.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: resourcemanager.cnrm.cloud.google.com/v1beta1 -kind: Project -metadata: - annotations: - # Set this field after finding the folder number. - cnrm.cloud.google.com/folder-id: "${FOLDER_ID?}" # {"$ref":"#/definitions/io.k8s.cli.setters.folder-id"} - name: ${PROJECT_ID?} # {"$ref":"#/definitions/io.k8s.cli.setters.project-id"} -spec: - name: ${PROJECT_ID?} # {"$ref":"#/definitions/io.k8s.cli.setters.project-id"} - billingAccountRef: - # Replace "${BILLING_ACCOUNT_ID?}" with the numeric ID for your billing account - external: "${BILLING_ACCOUNT_ID?}" # {"$ref":"#/definitions/io.k8s.cli.setters.billing-account"} diff --git a/config-connector/solutions/projects/kpt/project-hierarchy/Kptfile b/config-connector/solutions/projects/kpt/project-hierarchy/Kptfile deleted file mode 100644 index 3906abbbd44..00000000000 --- a/config-connector/solutions/projects/kpt/project-hierarchy/Kptfile +++ /dev/null @@ -1,43 +0,0 @@ -apiVersion: kpt.dev/v1alpha1 -kind: Kptfile -metadata: - name: project-hierarchy -packageMetadata: - shortDescription: Deploy a project under a folder within an organization -openAPI: - definitions: - io.k8s.cli.setters.org-id: - description: organization ID for associated services - x-k8s-cli: - setter: - name: org-id - value: "${ORG_ID}" - setBy: PLACEHOLDER - io.k8s.cli.setters.project-id: - description: ID of project - x-k8s-cli: - setter: - name: project-id - value: project-hierarchy-project - setBy: package-default - io.k8s.cli.setters.folder-name: - description: name of folder - x-k8s-cli: - setter: - name: folder-name - value: project-hierarchy-folder - setBy: package-default - io.k8s.cli.setters.billing-account: - description: ID of billing account - x-k8s-cli: - setter: - name: billing-account - value: ${BILLING_ACCOUNT_ID?} - setBy: PLACEHOLDER - io.k8s.cli.setters.folder-number: - description: numeric GCP ID of folder - x-k8s-cli: - setter: - name: folder-number - value: "${FOLDER_NUMBER?}" - setBy: PLACEHOLDER diff --git a/config-connector/solutions/projects/kpt/project-hierarchy/README.md b/config-connector/solutions/projects/kpt/project-hierarchy/README.md deleted file mode 100644 index 875ed1d5125..00000000000 --- a/config-connector/solutions/projects/kpt/project-hierarchy/README.md +++ /dev/null @@ -1,90 +0,0 @@ -Project Hierarchy -================================================== - -# NAME - - project-hierarchy - -# SYNOPSIS - - Config Connector compatible YAML files to create - a folder in an organization, and a project - beneath it. - -# CONSUMPTION - - Using [kpt](https://googlecontainertools.github.io/kpt/): - - Run `kpt pkg get https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit.git/config-connector/solutions/projects/kpt/project-hierarchy project-hierarchy`. - -# REQUIREMENTS - - A working Config Connector cluster using a - service account with the following roles in - the organization: - - `roles/resourcemanager.folderCreator` - - `roles/resourcemanager.projectCreator` - -# USAGE - - Replace the - `${BILLING_ACCOUNT_ID?}` and `${ORG_ID?}` values: - - From within this directory, run - ``` - kpt cfg set . billing-account VALUE - ``` - and - ``` - kpt cfg set . org-id VALUE - ``` - replacing `VALUE` with your billing account - and organization ID respectively. - - You will also need to reset the project ID, - since a project with the given ID already exists. - ``` - kpt cfg set . project-id VALUE - ``` - - - Currently, to create a project under a - folder, you must supply a numeric folder ID, - which is only available after the folder is - created. An issue outlining this shortfall in - Config Connector functionality is filed on the - project's GitHub, - https://github.com/GoogleCloudPlatform/k8s-config-connector/issues/104. - - - To be nested beneath it, the project still needs - a folder number. This can only be found after - creating the folder. You can do so with - ``` - kubectl apply -f folder.yaml - ``` - - Wait for GCP to generate the folder. - ``` - kubectl wait --for=condition=Ready -f folder.yaml - ``` - - Now extract the folder number. - ``` - FOLDER_NUMBER=$(kubectl describe -f folder.yaml | grep Name:\ *folders\/ | sed "s/.*folders\///") - ``` - You can set the folder number using the - following command: - ``` - kpt cfg set . folder-number $FOLDER_NUMBER --set-by "README-instructions" - ``` - - - Now you can fully apply this solution. - ``` - kubectl apply -f . - ``` - -# LICENSE - -Apache 2.0 - See [LICENSE](/LICENSE) for more information. diff --git a/config-connector/solutions/projects/kpt/project-hierarchy/folder.yaml b/config-connector/solutions/projects/kpt/project-hierarchy/folder.yaml deleted file mode 100644 index e5cf7938a0a..00000000000 --- a/config-connector/solutions/projects/kpt/project-hierarchy/folder.yaml +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: resourcemanager.cnrm.cloud.google.com/v1beta1 -kind: Folder -metadata: - annotations: - # Replace "${ORG_ID?}" with the numeric ID for your organization - cnrm.cloud.google.com/organization-id: "${ORG_ID}" # {"$ref":"#/definitions/io.k8s.cli.setters.org-id"} - name: project-hierarchy-folder # {"$ref":"#/definitions/io.k8s.cli.setters.folder-name"} -spec: - displayName: project-hierarchy-folder # {"$ref":"#/definitions/io.k8s.cli.setters.folder-name"} diff --git a/config-connector/solutions/projects/kpt/project-hierarchy/project.yaml b/config-connector/solutions/projects/kpt/project-hierarchy/project.yaml deleted file mode 100644 index d209efbdf70..00000000000 --- a/config-connector/solutions/projects/kpt/project-hierarchy/project.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: resourcemanager.cnrm.cloud.google.com/v1beta1 -kind: Project -metadata: - annotations: - # Set this field after finding the folder number. - cnrm.cloud.google.com/folder-id: "${FOLDER_NUMBER?}" # {"$ref":"#/definitions/io.k8s.cli.setters.folder-number"} - name: project-hierarchy-project # {"$ref":"#/definitions/io.k8s.cli.setters.project-id"} -spec: - name: project-hierarchy-project # {"$ref":"#/definitions/io.k8s.cli.setters.project-id"} - billingAccountRef: - # Replace "${BILLING_ACCOUNT_ID?}" with the numeric ID for your billing account - external: "${BILLING_ACCOUNT_ID?}" # {"$ref":"#/definitions/io.k8s.cli.setters.billing-account"} diff --git a/config-connector/solutions/projects/kpt/project-services/0-namespace.yaml b/config-connector/solutions/projects/kpt/project-services/0-namespace.yaml deleted file mode 100644 index 1837bc93ee8..00000000000 --- a/config-connector/solutions/projects/kpt/project-services/0-namespace.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: v1 -kind: Namespace -metadata: - annotations: - # Replace the ${PROJECT_ID?} below with your desired project ID. - cnrm.cloud.google.com/project-id: ${PROJECT_ID?} # {"$ref":"#/definitions/io.k8s.cli.setters.project-id"} - name: project-annotated diff --git a/config-connector/solutions/projects/kpt/project-services/Kptfile b/config-connector/solutions/projects/kpt/project-services/Kptfile deleted file mode 100644 index 14a9b535124..00000000000 --- a/config-connector/solutions/projects/kpt/project-services/Kptfile +++ /dev/null @@ -1,22 +0,0 @@ -apiVersion: kpt.dev/v1alpha1 -kind: Kptfile -metadata: - name: project-services -packageMetadata: - shortDescription: enables a service on a project -openAPI: - definitions: - io.k8s.cli.setters.service-name: - description: API service name - x-k8s-cli: - setter: - name: service-name - value: firebasehosting.googleapis.com - setBy: package-default - io.k8s.cli.setters.project-id: - description: ID of project - x-k8s-cli: - setter: - name: project-id - value: ${PROJECT_ID?} - setBy: PLACEHOLDER diff --git a/config-connector/solutions/projects/kpt/project-services/README.md b/config-connector/solutions/projects/kpt/project-services/README.md deleted file mode 100644 index b1d0b273268..00000000000 --- a/config-connector/solutions/projects/kpt/project-services/README.md +++ /dev/null @@ -1,42 +0,0 @@ -Project Services -================================================== -# NAME - project-services -# SYNOPSIS - Config Connector compatible YAML files to enable services on a project. -# CONSUMPTION - Download the package using [kpt](https://googlecontainertools.github.io/kpt/). - ``` - kpt pkg get https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit.git/config-connector/solutions/projects/kpt/project-services project-services - ``` -# REQUIREMENTS - A working cluster with Config Connector installed. - - The "cnrm-system" service account must have -`roles/serviceusage.serviceUsageAdmin` or `roles/owner` for the desired project. -# SETTERS -| NAME | VALUE | SET BY | DESCRIPTION | COUNT | -|--------------|--------------------------------|-----------------|------------------|-------| -| project-id | ${PROJECT_ID?} | PLACEHOLDER | ID of project | 1 | -| service-name | firebasehosting.googleapis.com | package-default | API service name | 1 | -# USAGE - - Set project-id to the ID of the project to enable services for. - ``` - kpt cfg set . project-id your-project-id - ``` - _Optionally_, change the service name before applying the service. For example, to enable -[Compute Engine](https://cloud.google.com/compute/docs): - ``` - kpt cfg set . service-name compute.googleapis.com - ``` - The package-default value will enable -[Firebase](https://firebase.google.com/docs). - - Once your configuration is complete, simply apply: - ``` - kubectl apply -f . - ``` - Note: services that have been applied will have type `gcpservice` and be in the `project-annotated` namespace. -# LICENSE - Apache 2.0 - See [LICENSE](/LICENSE) for more information. diff --git a/config-connector/solutions/projects/kpt/project-services/service.yaml b/config-connector/solutions/projects/kpt/project-services/service.yaml deleted file mode 100644 index eb8b9356a82..00000000000 --- a/config-connector/solutions/projects/kpt/project-services/service.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: serviceusage.cnrm.cloud.google.com/v1beta1 -kind: Service -metadata: - namespace: project-annotated - name: firebasehosting.googleapis.com # {"$ref":"#/definitions/io.k8s.cli.setters.service-name"} diff --git a/config-connector/solutions/projects/kpt/shared-vpc/0-namespace.yaml b/config-connector/solutions/projects/kpt/shared-vpc/0-namespace.yaml deleted file mode 100644 index 96607be6e34..00000000000 --- a/config-connector/solutions/projects/kpt/shared-vpc/0-namespace.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: v1 -kind: Namespace -metadata: - annotations: - # Replace the ${HOST_PROJECT_ID?} below with your desired project ID for VPC host. - cnrm.cloud.google.com/project-id: ${HOST_PROJECT_ID?} # {"$ref":"#/definitions/io.k8s.cli.setters.host-project"} - name: shared-vpc-host-project-annotated ---- -apiVersion: v1 -kind: Namespace -metadata: - annotations: - # Replace the ${SERVICE_PROJECT_ID?} below with your desired project ID for VPC service. - cnrm.cloud.google.com/project-id: ${SERVICE_PROJECT_ID?} # {"$ref":"#/definitions/io.k8s.cli.setters.service-project"} - name: shared-vpc-service-project-annotated diff --git a/config-connector/solutions/projects/kpt/shared-vpc/Kptfile b/config-connector/solutions/projects/kpt/shared-vpc/Kptfile deleted file mode 100644 index af73fe25688..00000000000 --- a/config-connector/solutions/projects/kpt/shared-vpc/Kptfile +++ /dev/null @@ -1,51 +0,0 @@ -apiVersion: kpt.dev/v1alpha1 -kind: Kptfile -metadata: - name: shared-vpc -packageMetadata: - shortDescription: create a VPC network inside a host project to be consumed from - within a service project -openAPI: - definitions: - io.k8s.cli.setters.host-project: - description: ID of host project - x-k8s-cli: - setter: - name: host-project - value: ${HOST_PROJECT_ID?} - setBy: PLACEHOLDER - io.k8s.cli.setters.service-project: - description: ID of project using network - x-k8s-cli: - setter: - name: service-project - value: ${SERVICE_PROJECT_ID?} - setBy: PLACEHOLDER - io.k8s.cli.setters.billing-account: - description: ID of billing account - x-k8s-cli: - setter: - name: billing-account - value: "${BILLING_ACCOUNT_ID?}" - setBy: PLACEHOLDER - io.k8s.cli.setters.network-name: - description: name of VPC network - x-k8s-cli: - setter: - name: network-name - value: sharedvpcnetwork - setBy: package-default - io.k8s.cli.setters.org-id: - description: ID of organization - x-k8s-cli: - setter: - name: org-id - value: "${ORG_ID?}" - setBy: PLACEHOLDER - io.k8s.cli.setters.default-namespace: - description: namespace where solution will be applied - x-k8s-cli: - setter: - name: default-namespace - value: ${DEFAULT_NAMESPACE?} - setBy: PLACEHOLDER diff --git a/config-connector/solutions/projects/kpt/shared-vpc/README.md b/config-connector/solutions/projects/kpt/shared-vpc/README.md deleted file mode 100644 index 7ba9f6adcd1..00000000000 --- a/config-connector/solutions/projects/kpt/shared-vpc/README.md +++ /dev/null @@ -1,82 +0,0 @@ -Shared VPC Network -================================================== - -# NAME - - shared-vpc - -# SYNOPSIS - - Config Connector YAML files to create a VPC network inside a - host project to be consumed from within a service project. - -## Consumption - - Download the package using [kpt](https://googlecontainertools.github.io/kpt/): - ``` - kpt pkg get https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit.git/config-connector/solutions/projects/kpt/shared-vpc . - ``` - -## Requirements - - A working cluster with Config Connector installed. - - The "cnrm-system" service account, which must have: - - `roles/resourcemanager.projectCreator` in the target organization if service and -host projects do not yet exist, or the `owner` role in the projects if they already exist. - - `roles/compute.xpnAdmin` in the target organization - - `roles/billing.user` in the target billing account - - Cloud Billing and Cloud Resource Manager APIs enabled in the project managed by Config Connector - -## Usage - Set the ID for billing account, host project, service project, and organization: - ``` - kpt cfg set . billing-account VALUE - kpt cfg set . host-project VALUE - kpt cfg set . service-project VALUE - kpt cfg set . org-id VALUE - ``` - Set the default namespace setter to reflect the namespace you will apply the solution YAMLs to. This may be the namespace you set [here](https://cloud.google.com/config-connector/docs/how-to/setting-default-namespace). - ``` - kpt cfg set . default-namespace VALUE - ``` - where `VALUE` is the name of the namespace you found to be applicable above. - - _Optionally,_ you can also change the name of the VPC network, from the default value of `sharedvpcnetwork`. - - Once your configuration is complete, simply apply: - ``` - kubectl apply -f . - ``` - - You can check the applied resources by running the following command: - ``` - kubectl get -f . - ``` - - **Note:** To see the applied resources for a given namespace, run - `kubectl get gcp --namespace `, where `` is replaced by - the corresponding namespace in the `0-namespace.yaml` file. You'll need to use - type `gcpservice` to check the status of Service resources defined in - `service.yaml`. - - If you want to clean up the resources, run; - ``` - kubectl delete -f . - ``` - - **Note:** If `computesharedvpchostproject` can't be deleted with - the error message `Cannot disable project as a shared VPC host because it has - active service projects.` but `computesharedvpcserviceproject` is already - deleted, you'll need to [manually detach]( - https://cloud.google.com/vpc/docs/deprovisioning-shared-vpc#detach_service_projects) - the service project (specificed using kpt setter `service-project`) from the - host project (specified using kpt setter `host-project`). More details about - the root cause can be found in [this GitHub issue]( - https://github.com/GoogleCloudPlatform/k8s-config-connector/issues/167). - - -# License - - Apache 2.0 - See [LICENSE](/LICENSE) for more information. - diff --git a/config-connector/solutions/projects/kpt/shared-vpc/computesharedvpchostproject.yaml b/config-connector/solutions/projects/kpt/shared-vpc/computesharedvpchostproject.yaml deleted file mode 100644 index d34b91fbceb..00000000000 --- a/config-connector/solutions/projects/kpt/shared-vpc/computesharedvpchostproject.yaml +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: compute.cnrm.cloud.google.com/v1beta1 -kind: ComputeSharedVPCHostProject -metadata: - namespace: shared-vpc-host-project-annotated - name: computesharedvpchostproject - # This resource will enable the project this namespace is bound to as a Shared - # VPC host. You should only create one of these resources per project. If you - # have multiple namespaces mapping to the same project, ensure that only one - # ComputeSharedVPCHostProject resource exists across these namespaces. diff --git a/config-connector/solutions/projects/kpt/shared-vpc/computesharedvpcserviceproject.yaml b/config-connector/solutions/projects/kpt/shared-vpc/computesharedvpcserviceproject.yaml deleted file mode 100644 index ff93e822b26..00000000000 --- a/config-connector/solutions/projects/kpt/shared-vpc/computesharedvpcserviceproject.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: compute.cnrm.cloud.google.com/v1beta1 -kind: ComputeSharedVPCServiceProject -metadata: - namespace: shared-vpc-host-project-annotated - name: computesharedvpcserviceproject -spec: - projectRef: - # Replace the ${DEFAULT_NAMESPACE?} below with your default namespace. - namespace: ${DEFAULT_NAMESPACE?} # {"$ref":"#/definitions/io.k8s.cli.setters.default-namespace"} - # Replace the ${SERVICE_PROJECT_ID?} below with your desired project ID for VPC service. - name: ${SERVICE_PROJECT_ID?} # {"$ref":"#/definitions/io.k8s.cli.setters.service-project"} diff --git a/config-connector/solutions/projects/kpt/shared-vpc/network.yaml b/config-connector/solutions/projects/kpt/shared-vpc/network.yaml deleted file mode 100644 index a7bcf2236cd..00000000000 --- a/config-connector/solutions/projects/kpt/shared-vpc/network.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: compute.cnrm.cloud.google.com/v1beta1 -kind: ComputeNetwork -metadata: - namespace: shared-vpc-host-project-annotated - name: sharedvpcnetwork # {"$ref":"#/definitions/io.k8s.cli.setters.network-name"} -spec: - routingMode: GlOBAL - autoCreateSubnetworks: true diff --git a/config-connector/solutions/projects/kpt/shared-vpc/project.yaml b/config-connector/solutions/projects/kpt/shared-vpc/project.yaml deleted file mode 100644 index aa4f292aa95..00000000000 --- a/config-connector/solutions/projects/kpt/shared-vpc/project.yaml +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: resourcemanager.cnrm.cloud.google.com/v1beta1 -kind: Project -metadata: - annotations: - cnrm.cloud.google.com/organization-id: "${ORG_ID?}" # {"$ref":"#/definitions/io.k8s.cli.setters.org-id"} - name: ${HOST_PROJECT_ID?} # {"$ref":"#/definitions/io.k8s.cli.setters.host-project"} -spec: - # This name can be changed to something more human-readable if desired. - name: ${HOST_PROJECT_ID?} # {"$ref":"#/definitions/io.k8s.cli.setters.host-project"} - billingAccountRef: - # Replace "${BILLING_ACCOUNT_ID?}" with the numeric ID for your billing account - external: "${BILLING_ACCOUNT_ID?}" # {"$ref":"#/definitions/io.k8s.cli.setters.billing-account"} ---- -apiVersion: resourcemanager.cnrm.cloud.google.com/v1beta1 -kind: Project -metadata: - annotations: - cnrm.cloud.google.com/organization-id: "${ORG_ID?}" # {"$ref":"#/definitions/io.k8s.cli.setters.org-id"} - name: ${SERVICE_PROJECT_ID?} # {"$ref":"#/definitions/io.k8s.cli.setters.service-project"} -spec: - # This name can be changed to something more human-readable if desired. - name: ${SERVICE_PROJECT_ID?} # {"$ref":"#/definitions/io.k8s.cli.setters.service-project"} - billingAccountRef: - # Replace "${BILLING_ACCOUNT_ID?}" with the numeric ID for your billing account - external: "${BILLING_ACCOUNT_ID?}" # {"$ref":"#/definitions/io.k8s.cli.setters.billing-account"} diff --git a/config-connector/solutions/projects/kpt/shared-vpc/service.yaml b/config-connector/solutions/projects/kpt/shared-vpc/service.yaml deleted file mode 100644 index 06969a1e416..00000000000 --- a/config-connector/solutions/projects/kpt/shared-vpc/service.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: serviceusage.cnrm.cloud.google.com/v1beta1 -kind: Service -metadata: - namespace: shared-vpc-host-project-annotated - name: compute.googleapis.com - annotations: - cnrm.cloud.google.com/deletion-policy: abandon ---- -apiVersion: serviceusage.cnrm.cloud.google.com/v1beta1 -kind: Service -metadata: - namespace: shared-vpc-service-project-annotated - name: compute.googleapis.com - annotations: - cnrm.cloud.google.com/deletion-policy: abandon diff --git a/config-connector/solutions/projects/kpt/simple-project/Kptfile b/config-connector/solutions/projects/kpt/simple-project/Kptfile deleted file mode 100644 index dda20e75b41..00000000000 --- a/config-connector/solutions/projects/kpt/simple-project/Kptfile +++ /dev/null @@ -1,29 +0,0 @@ -apiVersion: kpt.dev/v1alpha1 -kind: Kptfile -metadata: - name: simple-project -packageMetadata: - shortDescription: A simple project -openAPI: - definitions: - io.k8s.cli.setters.org-id: - description: organization ID for associated services - x-k8s-cli: - setter: - name: org-id - value: "${ORG_ID?}" - setBy: PLACEHOLDER - io.k8s.cli.setters.project-id: - description: ID of project - x-k8s-cli: - setter: - name: project-id - value: ${PROJECT_ID?} - setBy: PLACEHOLDER - io.k8s.cli.setters.billing-account: - description: ID of billing account - x-k8s-cli: - setter: - name: billing-account - value: "${BILLING_ACCOUNT_ID?}" - setBy: PLACEHOLDER diff --git a/config-connector/solutions/projects/kpt/simple-project/README.md b/config-connector/solutions/projects/kpt/simple-project/README.md deleted file mode 100644 index 58a626933b8..00000000000 --- a/config-connector/solutions/projects/kpt/simple-project/README.md +++ /dev/null @@ -1,49 +0,0 @@ -Simple Project -================================================== - -# NAME - - simple-project - -# SYNOPSIS - - Config Connector compatible YAML files to create a project in an organization. - -# CONSUMPTION - Using kpt: - ``` - BASE=https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit.git - kpt pkg get $BASE/config-connector/solutions/projects/kpt/simple-project simple-project - ``` - -# REQUIREMENTS - A working cluster with Config Connector installed. - - Cloud Resource Manager API and Cloud Billing API enabled in the project where Config Connector is installed. - - The "cnrm-system" service account must have `roles/resourcemanager.projectCreator` in your organization and `roles/billing.user` for your billing account. - -# USAGE - In order to use, replace the `${PROJECT_ID?}`, `${BILLING_ACCOUNT_ID?}` and - `${ORG_ID?}` values with a unique new project ID, your billing account and - your organization id. You can do this with kpt setters: - ``` - kpt cfg set . project-id VALUE - kpt cfg set . billing-account VALUE - kpt cfg set . org-id VALUE - ``` - - Note: Updating the project-id will set both the project's ID and name to the - same value, if you want a different value for project name, edit - `project.yaml` and replace spec.name with your preferred project name. - - Once your information is in the configs, simply apply. - - ``` - kubectl apply -f . - ``` - -# License - - Apache 2.0 - See [LICENSE](/LICENSE) for more information. - diff --git a/config-connector/solutions/projects/kpt/simple-project/project.yaml b/config-connector/solutions/projects/kpt/simple-project/project.yaml deleted file mode 100644 index 9e4048806ff..00000000000 --- a/config-connector/solutions/projects/kpt/simple-project/project.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: resourcemanager.cnrm.cloud.google.com/v1beta1 -kind: Project -metadata: - annotations: - cnrm.cloud.google.com/organization-id: "${ORG_ID?}" # {"$ref":"#/definitions/io.k8s.cli.setters.org-id"} - name: ${PROJECT_ID?} # {"$ref":"#/definitions/io.k8s.cli.setters.project-id"} -spec: - # This name can be changed to something more human-readable if desired. - name: ${PROJECT_ID?} # {"$ref":"#/definitions/io.k8s.cli.setters.project-id"} - billingAccountRef: - # Replace "${BILLING_ACCOUNT_ID?}" with the numeric ID for your billing account - external: "${BILLING_ACCOUNT_ID?}" # {"$ref":"#/definitions/io.k8s.cli.setters.billing-account"} diff --git a/config-connector/solutions/sql/helm/mysql-private/README.md b/config-connector/solutions/sql/helm/mysql-private/README.md deleted file mode 100644 index 1803e6a81fa..00000000000 --- a/config-connector/solutions/sql/helm/mysql-private/README.md +++ /dev/null @@ -1,113 +0,0 @@ -# MySQL Private - -================================================== - -## NAME - - mysql-private - -## SYNOPSIS - - Config Connector compatible YAML files for creating a MySQL instance on a private network. - -## CONSUMPTION - - 1. Clone GoogleCloudPlatform/cloud-foundation-toolkit repository: - - ```bash - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit.git - ``` - - 1. Go to the service account folder: - - ```bash - cd cloud-foundation-toolkit/config-connector/solutions/sql/helm/mysql-private - ``` - -## REQUIREMENTS - -1. GKE Cluster with Config Connector and [Workload Identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#enable_workload_identity_on_a_new_cluster). -1. [Helm](../../../README.md#helm) -1. A working Config Connector instance. -1. The following APIs enabled in the project where Config Connector is installed: - - Cloud SQL Admin API - - Compute Engine API -1. The following APIs enabled in the project managed by Config Connector: - - Cloud SQL Admin API - - Compute Engine API - - Service Networking API - - Cloud Resource Manager API -1. The "cnrm-system" service account with either both `roles/cloudsql.admin` and - `roles/compute.networkAdmin` roles or `roles/owner` in the project managed by Config Connector. - -## USAGE - -All steps are run from the current directory ([config-connector/solutions/sql/helm/mysql-private](.)). - -1. Review and update the values in `./sql/values.yaml`. - -1. install and check the private network with Helm. - - Due to the bug in Config Connector ([more details](https://github.com/GoogleCloudPlatform/k8s-config-connector/issues/148)), the following resources must be in a ready state before the SQLInstance YAML is applied: - - `ComputeNetwork` - - `ComputeAddress` - - `ServiceNetworkingConnection` - - ```bash - # Do a dryrun on your chart and address issues if there are any - helm install ./network --dry-run --generate-name - - # install network chart - helm install ./network - - # Meke sure wait Status of ComputeNetwork,ComputeAddress,ServiceNetworkingConnection is Ready - kubectl describe ComputeNetwork,ComputeAddress,ServiceNetworkingConnection - ``` - -1. install and check the MySQL instance on private network with Helm. - - ```bash - # validate your chart - helm lint ./sql --set SQLUser.Name=username,SQLUser.Password=$(echo -n 'your-password' | base64) - - # check the output of your chart - helm template ./sql --set SQLUser.Name=username,SQLUser.Password=$(echo -n 'your-password' | base64) - - # Do a dryrun on your chart and address issues if there are any - helm install ./sql --dry-run --set SQLUser.Name=username,SQLUser.Password=$(echo -n 'your-password' | base64) --generate-name - - # install your chart - helm install ./sql --set SQLUser.Name=username,SQLUser.Password=$(echo -n 'your-password' | base64) --generate-name - ``` - -1. _Optionally_ set `Database.Name`, `MySQLInstance.Name`, and `MySQLInstance.Region` in the same -manner. Note that if your instance is deleted the name you used will be -reserved for 7 days. You will need to use a new name in order to re-create the -instance: - ```bash - # install your chart with custom changes - helm install ./sql --set SQLUser.Name=username,SQLUser.Password=$(echo -n 'your-password' | base64),Database.Name=mysql-private-databasename,MySQLInstance.Name=mysql-private-instancename,MySQLInstance.Region=us-west1 --generate-name - ``` - -1. Check the created helm release to verify the installation: - ```bash - helm list - ``` - Check the status of the sqlinstances,sqldatabase,sqlusers: - ```bash - kubectl describe sqlinstances,sqldatabase,sqlusers - ``` - -1. Clean up both installation: - - ```bash - # list Helm releases to obtain release name - helm list - - # delete release specifying release name from the previous command output. - helm delete [release_name] - ``` - -## LICENSE - -Apache 2.0 - See [LICENSE](/LICENSE) for more information. diff --git a/config-connector/solutions/sql/helm/mysql-private/network/Chart.yaml b/config-connector/solutions/sql/helm/mysql-private/network/Chart.yaml deleted file mode 100644 index 648d6192c1c..00000000000 --- a/config-connector/solutions/sql/helm/mysql-private/network/Chart.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -name: private-network-for-mysql -version: 0.1.0 -description: Creating a private network for MySQL instance diff --git a/config-connector/solutions/sql/helm/mysql-private/network/templates/computeaddress.yaml b/config-connector/solutions/sql/helm/mysql-private/network/templates/computeaddress.yaml deleted file mode 100644 index fa32cf3bc21..00000000000 --- a/config-connector/solutions/sql/helm/mysql-private/network/templates/computeaddress.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: compute.cnrm.cloud.google.com/v1beta1 -kind: ComputeAddress -metadata: - name: mysql-private-address -spec: - addressType: INTERNAL - location: global - purpose: VPC_PEERING - prefixLength: 16 - networkRef: - name: mysql-private-network diff --git a/config-connector/solutions/sql/helm/mysql-private/network/templates/computenetwork.yaml b/config-connector/solutions/sql/helm/mysql-private/network/templates/computenetwork.yaml deleted file mode 100644 index 7e40709dbf5..00000000000 --- a/config-connector/solutions/sql/helm/mysql-private/network/templates/computenetwork.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: compute.cnrm.cloud.google.com/v1beta1 -kind: ComputeNetwork -metadata: - name: mysql-private-network diff --git a/config-connector/solutions/sql/helm/mysql-private/network/templates/servicenetworkingconnection.yaml b/config-connector/solutions/sql/helm/mysql-private/network/templates/servicenetworkingconnection.yaml deleted file mode 100644 index 6c0d87f4a7f..00000000000 --- a/config-connector/solutions/sql/helm/mysql-private/network/templates/servicenetworkingconnection.yaml +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: servicenetworking.cnrm.cloud.google.com/v1beta1 -kind: ServiceNetworkingConnection -metadata: - name: mysql-private-connection -spec: - networkRef: - name: mysql-private-network - reservedPeeringRanges: - - name: mysql-private-address - service: servicenetworking.googleapis.com diff --git a/config-connector/solutions/sql/helm/mysql-private/sql/Chart.yaml b/config-connector/solutions/sql/helm/mysql-private/sql/Chart.yaml deleted file mode 100644 index 689c212279d..00000000000 --- a/config-connector/solutions/sql/helm/mysql-private/sql/Chart.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -name: mysql-private -version: 0.1.0 -description: Creating a MySQL instance on a private network diff --git a/config-connector/solutions/sql/helm/mysql-private/sql/templates/secret.yaml b/config-connector/solutions/sql/helm/mysql-private/sql/templates/secret.yaml deleted file mode 100644 index 22e152fbe7c..00000000000 --- a/config-connector/solutions/sql/helm/mysql-private/sql/templates/secret.yaml +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: v1 -kind: Secret -metadata: - name: mysql-private-secret -data: - password: {{ required "Password is required" .Values.SQLUser.Password }} diff --git a/config-connector/solutions/sql/helm/mysql-private/sql/templates/sqldatabase.yaml b/config-connector/solutions/sql/helm/mysql-private/sql/templates/sqldatabase.yaml deleted file mode 100644 index bfd0ea69650..00000000000 --- a/config-connector/solutions/sql/helm/mysql-private/sql/templates/sqldatabase.yaml +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: sql.cnrm.cloud.google.com/v1beta1 -kind: SQLDatabase -metadata: - name: {{ .Values.Database.Name }} -spec: - charset: utf8mb4 - collation: utf8mb4_bin - instanceRef: - name: {{ .Values.MySQLInstance.Name }} diff --git a/config-connector/solutions/sql/helm/mysql-private/sql/templates/sqlinstance.yaml b/config-connector/solutions/sql/helm/mysql-private/sql/templates/sqlinstance.yaml deleted file mode 100644 index aef77503587..00000000000 --- a/config-connector/solutions/sql/helm/mysql-private/sql/templates/sqlinstance.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: sql.cnrm.cloud.google.com/v1beta1 -kind: SQLInstance -metadata: - name: {{ .Values.MySQLInstance.Name }} -spec: - databaseVersion: MYSQL_5_7 - region: {{ .Values.MySQLInstance.Region }} - settings: - tier: db-f1-micro - ipConfiguration: - ipv4Enabled: false - privateNetworkRef: - name: mysql-private-network diff --git a/config-connector/solutions/sql/helm/mysql-private/sql/templates/sqluser.yaml b/config-connector/solutions/sql/helm/mysql-private/sql/templates/sqluser.yaml deleted file mode 100644 index a61c5f00e88..00000000000 --- a/config-connector/solutions/sql/helm/mysql-private/sql/templates/sqluser.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: sql.cnrm.cloud.google.com/v1beta1 -kind: SQLUser -metadata: - name: {{ required "User name is required field" .Values.SQLUser.Name }} -spec: - instanceRef: - name: {{ .Values.MySQLInstance.Name }} - host: "%" - password: - valueFrom: - secretKeyRef: - name: mysql-private-secret - key: password diff --git a/config-connector/solutions/sql/helm/mysql-private/sql/values.yaml b/config-connector/solutions/sql/helm/mysql-private/sql/values.yaml deleted file mode 100644 index 07221760f34..00000000000 --- a/config-connector/solutions/sql/helm/mysql-private/sql/values.yaml +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -SQLUser: - Name: - Password: - -MySQLInstance: - Name: mysql-private-instance - Region: us-central1 - -Database: - Name: mysql-private-database diff --git a/config-connector/solutions/sql/helm/postgres-ha/Chart.yaml b/config-connector/solutions/sql/helm/postgres-ha/Chart.yaml deleted file mode 100644 index c94097b0ada..00000000000 --- a/config-connector/solutions/sql/helm/postgres-ha/Chart.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -name: postgres-ha -version: 0.1.0 -description: Creating a Postgres high availability cluster diff --git a/config-connector/solutions/sql/helm/postgres-ha/README.md b/config-connector/solutions/sql/helm/postgres-ha/README.md deleted file mode 100644 index 6177693c25f..00000000000 --- a/config-connector/solutions/sql/helm/postgres-ha/README.md +++ /dev/null @@ -1,109 +0,0 @@ -# PostgreSQL High Availability - -================================================== - -## NAME - - postgres-ha - -## SYNOPSIS - - Config Connector compatible yaml files to configure a high availability PostgreSQL cluster. - -## CONSUMPTION - - 1. Clone GoogleCloudPlatform/cloud-foundation-toolkit repository: - - ```bash - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit.git - ``` - - 1. Go to the service account folder: - - ```bash - cd cloud-foundation-toolkit/config-connector/solutions/sql/helm/postgres-ha - ``` - -## REQUIREMENTS - -1. GKE Cluster with [Config Connector installed using a GKE Workload Identity](https://cloud.google.com/config-connector/docs/how-to/install-upgrade-uninstall#workload-identity). -1. [Helm](../../../README.md#helm). -1. Cloud SQL Admin API enabled in the project where Config Connector is installed. -1. Cloud SQL Admin API enabled in the project managed by Config Connector if it is installed in a different project. -1. The "cnrm-system" service account with either `roles/cloudsql.admin` or `roles/owner` in the project managed by Config Connector. - -## USAGE - -All steps are run from the current directory ([config-connector/solutions/sql/helm/postgres-ha](.)). - -1. Review and update the values in `./values.yaml`. - -1. Configure a high availability PostgreSQL cluster with Helm. - - ```bash - # validate your chart - helm lint . --set User1.Name=first-username,User2.Name=second-username,User3.Name=third-username,\ - User1.Password=$(echo -n 'first-password' | base64),User2.Password=$(echo -n 'second-password' | base64),\ - User3.Password=$(echo -n 'third-password' | base64) - - # check the output of your chart - helm template . --set User1.Name=first-username,User2.Name=second-username,User3.Name=third-username,\ - User1.Password=$(echo -n 'first-password' | base64),User2.Password=$(echo -n 'second-password' | base64),\ - User3.Password=$(echo -n 'third-password' | base64) - - # do a dryrun on your chart - helm install . --dry-run --set User1.Name=first-username,User2.Name=second-username,User3.Name=third-username,\ - User1.Password=$(echo -n 'first-password' | base64),User2.Password=$(echo -n 'second-password' | base64),\ - User3.Password=$(echo -n 'third-password' | base64) --generate-name - - # install your chart - helm install . --set User1.Name=first-username,User2.Name=second-username,User3.Name=third-username,\ - User1.Password=$(echo -n 'first-password' | base64),User2.Password=$(echo -n 'second-password' | base64),\ - User3.Password=$(echo -n 'third-password' | base64) --generate-name - ``` - -1. _Optionally_ here the list of things you can customize. - - | NAME | DEFAULT VALUE | DESCRIPTION | - |-------------------|------------------------|--------------------------------| - | Database1.Name | postgres-ha-database-1 | name of first SQL database | - | Database2.Name | postgres-ha-database-2 | name of second SQL database | - | external-ip-range | 192.10.10.10/32 | ip range to allow to connect | - | PostgreSQLInstance.Name | postgres-ha-solution | name of main SQL instance | - | PostgreSQLInstance.Region | us-central1 | region of SQL instance | - | PostgreSQLInstance.Zone | us-central1-c | zone of main instance | - | PostgreSQLInstance.Replica1.Zone | us-central1-a | zone of first replica instance | - | PostgreSQLInstance.Replica2.Zone | us-central1-b | zone of second replica instance| - | PostgreSQLInstance.Replica3.Zone | us-central1-c | zone of third replica instance | - - **Note:** If your SQL Instance is deleted, the name you used will be reserved -for **7 days**. In order to re-apply this solution, you need to run the following command to update the value of PostgreSQLInstance.Name to "new-instance-name". - - ```bash - helm install . --set User1.Name=first-username,User2.Name=second-username,User3.Name=third-username,\ - User1.Password=$(echo -n 'first-password' | base64),User2.Password=$(echo -n 'second-password' | base64),\ - User3.Password=$(echo -n 'third-password' | base64), PostgreSQLInstance.Name=new-instance-name --generate-name - ``` - -1. Check the created helm release to verify the installation: - ```bash - helm list - ``` - Check the status of the sqlinstances,sqldatabase,sqlusers: - ```bash - kubectl describe sqldatabase,sqlinstance,sqluser,secret - ``` - -1. Clean up installation: - - ```bash - # list Helm releases to obtain release names - helm list - - # delete release specifying release name from the previous command output. - helm delete [release_name] - ``` - -## LICENSE - -Apache 2.0 - See [LICENSE](/LICENSE) for more information. diff --git a/config-connector/solutions/sql/helm/postgres-ha/templates/secret.yaml b/config-connector/solutions/sql/helm/postgres-ha/templates/secret.yaml deleted file mode 100644 index e54a649c33e..00000000000 --- a/config-connector/solutions/sql/helm/postgres-ha/templates/secret.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: v1 -kind: Secret -metadata: - name: postgres-ha-secret -data: - password1: {{ required "Password of user1 is required!" .Values.User1.Password }} - password2: {{ required "Password of user2 is required!" .Values.User2.Password }} - password3: {{ required "Password of user3 is required!" .Values.User3.Password }} diff --git a/config-connector/solutions/sql/helm/postgres-ha/templates/sqldatabase.yaml b/config-connector/solutions/sql/helm/postgres-ha/templates/sqldatabase.yaml deleted file mode 100644 index d60ade62def..00000000000 --- a/config-connector/solutions/sql/helm/postgres-ha/templates/sqldatabase.yaml +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: sql.cnrm.cloud.google.com/v1beta1 -kind: SQLDatabase -metadata: - name: {{ .Values.Database1.Name }} -spec: - charset: UTF8 - collation: en_US.UTF8 - instanceRef: - name: {{ .Values.PostgreSQLInstance.Name }} ---- -apiVersion: sql.cnrm.cloud.google.com/v1beta1 -kind: SQLDatabase -metadata: - name: {{ .Values.Database2.Name }} -spec: - charset: UTF8 - collation: en_US.UTF8 - instanceRef: - name: {{ .Values.PostgreSQLInstance.Name }} diff --git a/config-connector/solutions/sql/helm/postgres-ha/templates/sqlinstance.yaml b/config-connector/solutions/sql/helm/postgres-ha/templates/sqlinstance.yaml deleted file mode 100644 index 27a66825023..00000000000 --- a/config-connector/solutions/sql/helm/postgres-ha/templates/sqlinstance.yaml +++ /dev/null @@ -1,153 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: sql.cnrm.cloud.google.com/v1beta1 -kind: SQLInstance -metadata: - name: {{ .Values.PostgreSQLInstance.Name }} -spec: - databaseVersion: POSTGRES_9_6 - region: {{ .Values.PostgreSQLInstance.Region }} - settings: - activationPolicy: ALWAYS - diskAutoresize: true - diskSize: 10 - diskType: PD_SSD - pricingPlan: PER_USE - replicationType: SYNCHRONOUS - tier: db-custom-2-13312 - availabilityType: REGIONAL - backupConfiguration: - binaryLogEnabled: false - enabled: true - startTime: 20:55 - databaseFlags: - - name: autovacuum - value: "off" - ipConfiguration: - ipv4Enabled: true - requireSsl: true - authorizedNetworks: - - name: postgres-ha-solution-cidr - value: {{ .Values.ExternalIP.Range }} - locationPreference: - zone: {{ .Values.PostgreSQLInstance.Zone }} - maintenanceWindow: - day: 7 - hour: 12 - updateTrack: stable ---- -apiVersion: sql.cnrm.cloud.google.com/v1beta1 -kind: SQLInstance -metadata: - name: {{ .Values.PostgreSQLInstance.Replica1.Name }} -spec: - databaseVersion: POSTGRES_9_6 - region: {{ .Values.PostgreSQLInstance.Region }} - masterInstanceRef: - name: {{ .Values.PostgreSQLInstance.Name }} - settings: - activationPolicy: ALWAYS - crashSafeReplication: true - diskAutoresize: true - diskSize: 10 - diskType: PD_HDD - pricingPlan: PER_USE - replicationType: SYNCHRONOUS - tier: db-custom-2-13312 - availabilityType: ZONAL - databaseFlags: - - name: autovacuum - value: "off" - ipConfiguration: - ipv4Enabled: true - requireSsl: false - authorizedNetworks: - - name: postgres-ha-solution-cidr - value: {{ .Values.ExternalIP.Range }} - locationPreference: - zone: {{ .Values.PostgreSQLInstance.Replica1.Zone }} - maintenanceWindow: - day: 7 - hour: 12 - updateTrack: stable ---- -apiVersion: sql.cnrm.cloud.google.com/v1beta1 -kind: SQLInstance -metadata: - name: {{ .Values.PostgreSQLInstance.Replica2.Name }} -spec: - databaseVersion: POSTGRES_9_6 - region: {{ .Values.PostgreSQLInstance.Region }} - masterInstanceRef: - name: {{ .Values.PostgreSQLInstance.Name }} - settings: - activationPolicy: ALWAYS - crashSafeReplication: true - diskAutoresize: true - diskSize: 10 - diskType: PD_HDD - pricingPlan: PER_USE - replicationType: SYNCHRONOUS - tier: db-custom-2-13312 - availabilityType: ZONAL - databaseFlags: - - name: autovacuum - value: "off" - ipConfiguration: - ipv4Enabled: true - requireSsl: false - authorizedNetworks: - - name: postgres-ha-solution-cidr - value: {{ .Values.ExternalIP.Range }} - locationPreference: - zone: {{ .Values.PostgreSQLInstance.Replica2.Zone }} - maintenanceWindow: - day: 7 - hour: 12 - updateTrack: stable ---- -apiVersion: sql.cnrm.cloud.google.com/v1beta1 -kind: SQLInstance -metadata: - name: {{ .Values.PostgreSQLInstance.Replica3.Name }} -spec: - databaseVersion: POSTGRES_9_6 - region: {{ .Values.PostgreSQLInstance.Region }} - masterInstanceRef: - name: {{ .Values.PostgreSQLInstance.Name }} - settings: - activationPolicy: ALWAYS - crashSafeReplication: true - diskAutoresize: true - diskSize: 10 - diskType: PD_HDD - pricingPlan: PER_USE - replicationType: SYNCHRONOUS - tier: db-custom-2-13312 - availabilityType: ZONAL - databaseFlags: - - name: autovacuum - value: "off" - ipConfiguration: - ipv4Enabled: true - requireSsl: false - authorizedNetworks: - - name: postgres-ha-solution-cidr - value: {{ .Values.ExternalIP.Range }} - locationPreference: - zone: {{ .Values.PostgreSQLInstance.Replica3.Zone }} - maintenanceWindow: - day: 7 - hour: 12 - updateTrack: stable diff --git a/config-connector/solutions/sql/helm/postgres-ha/templates/sqluser.yaml b/config-connector/solutions/sql/helm/postgres-ha/templates/sqluser.yaml deleted file mode 100644 index 0a3ae6dd6f6..00000000000 --- a/config-connector/solutions/sql/helm/postgres-ha/templates/sqluser.yaml +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: sql.cnrm.cloud.google.com/v1beta1 -kind: SQLUser -metadata: - name: {{ required "Username1 is required!" .Values.User1.Name }} -spec: - instanceRef: - name: {{ .Values.PostgreSQLInstance.Name }} - password: - valueFrom: - secretKeyRef: - name: postgres-ha-secret - key: password1 ---- -apiVersion: sql.cnrm.cloud.google.com/v1beta1 -kind: SQLUser -metadata: - name: {{ required "Username2 is required!" .Values.User2.Name }} -spec: - instanceRef: - name: {{ .Values.PostgreSQLInstance.Name }} - password: - valueFrom: - secretKeyRef: - name: postgres-ha-secret - key: password2 ---- -apiVersion: sql.cnrm.cloud.google.com/v1beta1 -kind: SQLUser -metadata: - name: {{ required "Username3 is required!" .Values.User3.Name }} -spec: - instanceRef: - name: {{ .Values.PostgreSQLInstance.Name }} - password: - valueFrom: - secretKeyRef: - name: postgres-ha-secret - key: password3 diff --git a/config-connector/solutions/sql/helm/postgres-ha/values.yaml b/config-connector/solutions/sql/helm/postgres-ha/values.yaml deleted file mode 100644 index d1b644bc516..00000000000 --- a/config-connector/solutions/sql/helm/postgres-ha/values.yaml +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -PostgreSQLInstance: - Name: postgres-ha-solution - Region: us-central - Zone: us-central-c - Replica1: - Name: replica-instance-1-name - Zone: us-central-a - Replica2: - Name: replica-instance-2-name - Zone: us-central-b - Replica3: - Name: replica-instance-3-name - Zone: us-central-c - -User1: - Name: - Password: - -User2: - Name: - Password: - -User3: - Name: - Password: - -Database1: - Name: postgres-ha-database-1 - -Database2: - Name: postgres-ha-database-2 - -ExternalIP: - Range: 199.10.10.10/32 diff --git a/config-connector/solutions/sql/kpt/mysql-ha/Kptfile b/config-connector/solutions/sql/kpt/mysql-ha/Kptfile deleted file mode 100644 index 6989914e10d..00000000000 --- a/config-connector/solutions/sql/kpt/mysql-ha/Kptfile +++ /dev/null @@ -1,68 +0,0 @@ -apiVersion: kpt.dev/v1alpha1 -kind: Kptfile -metadata: - name: mysql-ha -packageMetadata: - shortDescription: creates a mysql high availability cluster -openAPI: - definitions: - io.k8s.cli.setters.test-pw: - description: password for SQL user "test" (base64 encoded) - x-k8s-cli: - setter: - name: test-pw - value: "${PASSWORD_1?}" - setBy: PLACEHOLDER - io.k8s.cli.setters.test2-pw: - description: password for SQL user "test2" (base64 encoded) - x-k8s-cli: - setter: - name: test2-pw - value: "${PASSWORD_2?}" - setBy: PLACEHOLDER - io.k8s.cli.setters.test3-pw: - description: password for SQL user "test3" (base64 encoded) - x-k8s-cli: - setter: - name: test3-pw - value: "${PASSWORD_3?}" - setBy: PLACEHOLDER - io.k8s.cli.setters.instance-name: - description: name of SQL instance - x-k8s-cli: - setter: - name: instance-name - value: mysql-ha-solution - setBy: package-default - io.k8s.cli.substitutions.failover-instance-name: - x-k8s-cli: - substitution: - name: failover-instance-name - pattern: INSTANCE_NAME_SETTER-failover-test - values: - - marker: INSTANCE_NAME_SETTER - ref: '#/definitions/io.k8s.cli.setters.instance-name' - io.k8s.cli.substitutions.replica-instance-0-name: - x-k8s-cli: - substitution: - name: replica-instance-0-name - pattern: INSTANCE_NAME_SETTER-replica-test0 - values: - - marker: INSTANCE_NAME_SETTER - ref: '#/definitions/io.k8s.cli.setters.instance-name' - io.k8s.cli.substitutions.replica-instance-1-name: - x-k8s-cli: - substitution: - name: replica-instance-1-name - pattern: INSTANCE_NAME_SETTER-replica-test1 - values: - - marker: INSTANCE_NAME_SETTER - ref: '#/definitions/io.k8s.cli.setters.instance-name' - io.k8s.cli.substitutions.replica-instance-2-name: - x-k8s-cli: - substitution: - name: replica-instance-2-name - pattern: INSTANCE_NAME_SETTER-replica-test2 - values: - - marker: INSTANCE_NAME_SETTER - ref: '#/definitions/io.k8s.cli.setters.instance-name' diff --git a/config-connector/solutions/sql/kpt/mysql-ha/README.md b/config-connector/solutions/sql/kpt/mysql-ha/README.md deleted file mode 100644 index 2aee31c961f..00000000000 --- a/config-connector/solutions/sql/kpt/mysql-ha/README.md +++ /dev/null @@ -1,59 +0,0 @@ -MySQL High Availability -================================================== -# NAME - mysql-ha -# SYNOPSIS - Config Connector compatible YAMLs for creating a high availability MySQL cluster -# CONSUMPTION - Download the package using [kpt](https://googlecontainertools.github.io/kpt/): - ``` - kpt pkg get https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit.git/config-connector/solutions/sql/kpt/mysql-ha mysql-ha - ``` -# REQUIREMENTS - - A working Config Connector instance using the "cnrm-system" service - account with either `roles/cloudsql.admin` or `roles/owner` in the project - managed by Config Connector - - Cloud SQL Admin API enabled in the project where Config Connector is - installed - - Cloud SQL Admin API enabled in the project managed by Config Connector if - it is a different project - -# SETTERS -| NAME | VALUE | SET BY | DESCRIPTION | COUNT | -|-------------------|---------------------|-----------------|-------------------------------|-------| -| instance-name | mysql-ha-solution | package-default | name of SQL instance | 14 | -| test-pw | ${PASSWORD_1?} | PLACEHOLDER | password for SQL user "test"
(base64 encoded) | 1 | -| test2-pw | ${PASSWORD_2?} | PLACEHOLDER | password for SQL user "test2"
(base64 encoded) | 1 | -| test3-pw | ${PASSWORD_3?} | PLACEHOLDER | password for SQL user "test3"
(base64 encoded) | 1 | - -# USAGE - Configure setters using kpt as follows: - ``` - kpt cfg set . NAME VALUE - ``` - Setting placeholder values is required, changing package-defaults is optional. - - Set `test-pw`, `test2-pw`, and `test3-pw` to the [base64 - encoded](https://kubernetes.io/docs/concepts/configuration/secret/#creating-a-secret-manually) passwords for user `test`, - user `test2`, and user `test3`: - ``` - kpt cfg set . test-pw $(echo -n 'first-password' | base64) - kpt cfg set . test2-pw $(echo -n 'second-password' | base64) - kpt cfg set . test3-pw $(echo -n 'third-password' | base64) - ``` - _Optionally,_ set `instance-name` in the same manner. - - **Note:** If your SQL Instance is deleted, the name you used will be reserved - for **7 days**. In order to re-apply this solution, you need to run - `kpt cfg set . instance-name new-instance-name` to change to a new - instance name that hasn't been used in the last 7 days. - - Once the configuration is satisfactory, apply: - ``` - kubectl apply -f . - ``` - **Note:** It will take up to ~40 mins for all the resources to be `Ready`. - -# LICENSE - Apache 2.0 - See [LICENSE](/LICENSE) for more information. - diff --git a/config-connector/solutions/sql/kpt/mysql-ha/secret.yaml b/config-connector/solutions/sql/kpt/mysql-ha/secret.yaml deleted file mode 100644 index bdb124a64c2..00000000000 --- a/config-connector/solutions/sql/kpt/mysql-ha/secret.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: v1 -kind: Secret -metadata: - name: mysql-ha-secret -data: - password1: "${PASSWORD_1?}" # {"$ref":"#/definitions/io.k8s.cli.setters.test-pw"} - password2: "${PASSWORD_2?}" # {"$ref":"#/definitions/io.k8s.cli.setters.test2-pw"} - password3: "${PASSWORD_3?}" # {"$ref":"#/definitions/io.k8s.cli.setters.test3-pw"} diff --git a/config-connector/solutions/sql/kpt/mysql-ha/sqldatabase.yaml b/config-connector/solutions/sql/kpt/mysql-ha/sqldatabase.yaml deleted file mode 100644 index 75b0b2bd163..00000000000 --- a/config-connector/solutions/sql/kpt/mysql-ha/sqldatabase.yaml +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: sql.cnrm.cloud.google.com/v1beta1 -kind: SQLDatabase -metadata: - name: mysql-ha-database -spec: - charset: utf8mb4 - collation: utf8mb4_general_ci - instanceRef: - name: mysql-ha-solution # {"$ref":"#/definitions/io.k8s.cli.setters.instance-name"} ---- -apiVersion: sql.cnrm.cloud.google.com/v1beta1 -kind: SQLDatabase -metadata: - name: mysql-ha-database-additional -spec: - charset: utf8mb4 - collation: utf8mb4_general_ci - instanceRef: - name: mysql-ha-solution # {"$ref":"#/definitions/io.k8s.cli.setters.instance-name"} diff --git a/config-connector/solutions/sql/kpt/mysql-ha/sqlinstance.yaml b/config-connector/solutions/sql/kpt/mysql-ha/sqlinstance.yaml deleted file mode 100644 index 1e9afb11697..00000000000 --- a/config-connector/solutions/sql/kpt/mysql-ha/sqlinstance.yaml +++ /dev/null @@ -1,206 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: sql.cnrm.cloud.google.com/v1beta1 -kind: SQLInstance -metadata: - name: mysql-ha-solution # {"$ref":"#/definitions/io.k8s.cli.setters.instance-name"} - labels: - solution: mysql-ha - node: master -spec: - databaseVersion: MYSQL_5_7 - region: us-central1 - settings: - activationPolicy: ALWAYS - diskAutoresize: true - diskSize: 10 - diskType: PD_SSD - pricingPlan: PER_USE - replicationType: SYNCHRONOUS - tier: db-n1-standard-1 - backupConfiguration: - binaryLogEnabled: true - enabled: true - startTime: 20:55 - databaseFlags: - - name: long_query_time - value: "1" - ipConfiguration: - ipv4Enabled: true - requireSsl: true - authorizedNetworks: - - name: mysql-ha-solution-cidr - value: 192.10.10.10/32 - locationPreference: - zone: us-central1-c - maintenanceWindow: - day: 7 - hour: 12 - updateTrack: stable ---- -apiVersion: sql.cnrm.cloud.google.com/v1beta1 -kind: SQLInstance -metadata: - name: mysql-ha-solution-failover-test # {"$ref":"#/definitions/io.k8s.cli.substitutions.failover-instance-name"} - labels: - solution: mysql-ha - node: failover -spec: - databaseVersion: MYSQL_5_7 - region: us-central1 - masterInstanceRef: - name: mysql-ha-solution # {"$ref":"#/definitions/io.k8s.cli.setters.instance-name"} - replicaConfiguration: - connectRetryInterval: 5 - settings: - activationPolicy: ALWAYS - crashSafeReplication: true - diskAutoresize: true - diskSize: 10 - diskType: PD_SSD - pricingPlan: PER_USE - replicationType: SYNCHRONOUS - tier: db-n1-standard-1 - databaseFlags: - - name: long_query_time - value: "1" - ipConfiguration: - ipv4Enabled: true - requireSsl: false - authorizedNetworks: - - name: mysql-ha-solution-cidr - value: 192.10.10.10/32 - locationPreference: - zone: us-central1-a - maintenanceWindow: - day: 3 - hour: 20 - updateTrack: canary ---- -apiVersion: sql.cnrm.cloud.google.com/v1beta1 -kind: SQLInstance -metadata: - name: mysql-ha-solution-replica-test0 # {"$ref":"#/definitions/io.k8s.cli.substitutions.replica-instance-0-name"} - labels: - solution: mysql-ha - node: replica-test0 -spec: - databaseVersion: MYSQL_5_7 - region: us-central1 - masterInstanceRef: - name: mysql-ha-solution # {"$ref":"#/definitions/io.k8s.cli.setters.instance-name"} - replicaConfiguration: - connectRetryInterval: 5 - settings: - activationPolicy: ALWAYS - crashSafeReplication: true - diskAutoresize: true - diskSize: 10 - diskType: PD_HDD - pricingPlan: PER_USE - replicationType: SYNCHRONOUS - tier: db-n1-standard-1 - databaseFlags: - - name: long_query_time - value: "1" - ipConfiguration: - ipv4Enabled: true - requireSsl: false - authorizedNetworks: - - name: mysql-ha-solution-cidr - value: 192.10.10.10/32 - locationPreference: - zone: us-central1-a - maintenanceWindow: - day: 1 - hour: 22 - updateTrack: stable ---- -apiVersion: sql.cnrm.cloud.google.com/v1beta1 -kind: SQLInstance -metadata: - name: mysql-ha-solution-replica-test1 # {"$ref":"#/definitions/io.k8s.cli.substitutions.replica-instance-1-name"} - labels: - solution: mysql-ha - node: replica-test1 -spec: - databaseVersion: MYSQL_5_7 - region: us-central1 - masterInstanceRef: - name: mysql-ha-solution # {"$ref":"#/definitions/io.k8s.cli.setters.instance-name"} - replicaConfiguration: - connectRetryInterval: 5 - settings: - activationPolicy: ALWAYS - crashSafeReplication: true - diskAutoresize: true - diskSize: 10 - diskType: PD_HDD - pricingPlan: PER_USE - replicationType: SYNCHRONOUS - tier: db-n1-standard-1 - databaseFlags: - - name: long_query_time - value: "1" - ipConfiguration: - ipv4Enabled: true - requireSsl: false - authorizedNetworks: - - name: mysql-ha-solution-cidr - value: 192.10.10.10/32 - locationPreference: - zone: us-central1-b - maintenanceWindow: - day: 1 - hour: 22 - updateTrack: stable ---- -apiVersion: sql.cnrm.cloud.google.com/v1beta1 -kind: SQLInstance -metadata: - name: mysql-ha-solution-replica-test2 # {"$ref":"#/definitions/io.k8s.cli.substitutions.replica-instance-2-name"} - labels: - solution: mysql-ha - node: replica-test2 -spec: - databaseVersion: MYSQL_5_7 - region: us-central1 - masterInstanceRef: - name: mysql-ha-solution # {"$ref":"#/definitions/io.k8s.cli.setters.instance-name"} - replicaConfiguration: - connectRetryInterval: 5 - settings: - activationPolicy: ALWAYS - crashSafeReplication: true - diskAutoresize: true - diskSize: 10 - diskType: PD_HDD - pricingPlan: PER_USE - replicationType: SYNCHRONOUS - tier: db-n1-standard-1 - databaseFlags: - - name: long_query_time - value: "1" - ipConfiguration: - ipv4Enabled: true - requireSsl: false - authorizedNetworks: - - name: mysql-ha-solution-cidr - value: 192.10.10.10/32 - locationPreference: - zone: us-central1-c - maintenanceWindow: - day: 1 - hour: 22 - updateTrack: stable diff --git a/config-connector/solutions/sql/kpt/mysql-ha/sqluser.yaml b/config-connector/solutions/sql/kpt/mysql-ha/sqluser.yaml deleted file mode 100644 index 4275ac41048..00000000000 --- a/config-connector/solutions/sql/kpt/mysql-ha/sqluser.yaml +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: sql.cnrm.cloud.google.com/v1beta1 -kind: SQLUser -metadata: - name: test -spec: - instanceRef: - name: mysql-ha-solution # {"$ref":"#/definitions/io.k8s.cli.setters.instance-name"} - host: "%" - password: - valueFrom: - secretKeyRef: - name: mysql-ha-secret - key: password1 ---- -apiVersion: sql.cnrm.cloud.google.com/v1beta1 -kind: SQLUser -metadata: - name: test2 -spec: - instanceRef: - name: mysql-ha-solution # {"$ref":"#/definitions/io.k8s.cli.setters.instance-name"} - host: localhost - password: - valueFrom: - secretKeyRef: - name: mysql-ha-secret - key: password2 ---- -apiVersion: sql.cnrm.cloud.google.com/v1beta1 -kind: SQLUser -metadata: - name: test3 -spec: - instanceRef: - name: mysql-ha-solution # {"$ref":"#/definitions/io.k8s.cli.setters.instance-name"} - host: localhost - password: - valueFrom: - secretKeyRef: - name: mysql-ha-secret - key: password3 diff --git a/config-connector/solutions/sql/kpt/mysql-private/Kptfile b/config-connector/solutions/sql/kpt/mysql-private/Kptfile deleted file mode 100644 index d751daa54ce..00000000000 --- a/config-connector/solutions/sql/kpt/mysql-private/Kptfile +++ /dev/null @@ -1,43 +0,0 @@ -apiVersion: kpt.dev/v1alpha1 -kind: Kptfile -metadata: - name: mysql-private -packageMetadata: - shortDescription: configure private mysql instance -openAPI: - definitions: - io.k8s.cli.setters.username: - description: name of SQL user - x-k8s-cli: - setter: - name: username - value: ${USERNAME?} - setBy: PLACEHOLDER - io.k8s.cli.setters.password: - description: SQL password (base64 encoded) - x-k8s-cli: - setter: - name: password - value: ${PASSWORD?} - setBy: PLACEHOLDER - io.k8s.cli.setters.region: - description: region of SQL instance - x-k8s-cli: - setter: - name: region - value: us-central1 - setBy: package-default - io.k8s.cli.setters.database-name: - description: name of SQL database - x-k8s-cli: - setter: - name: database-name - value: mysql-private-database - setBy: package-default - io.k8s.cli.setters.instance-name: - description: name of SQL instance - x-k8s-cli: - setter: - name: instance-name - value: mysql-private-instance - setBy: package-default diff --git a/config-connector/solutions/sql/kpt/mysql-private/README.md b/config-connector/solutions/sql/kpt/mysql-private/README.md deleted file mode 100644 index 677757f7087..00000000000 --- a/config-connector/solutions/sql/kpt/mysql-private/README.md +++ /dev/null @@ -1,70 +0,0 @@ -MySQL Private -================================================== -# NAME - mysql-private -# SYNOPSIS - Config Connector compatible YAML files for creating a MySQL instance on a private network -# CONSUMPTION - Download the package using [kpt](https://googlecontainertools.github.io/kpt/). - ``` - kpt pkg get https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit.git/config-connector/solutions/sql/kpt/mysql-private mysql-private - ``` - -# REQUIREMENTS - - A working Config Connector instance using the "cnrm-system" service - account with either both `roles/cloudsql.admin` and - `roles/compute.networkAdmin` roles or `roles/owner` in the project managed - by Config Connector - - The following APIs enabled in the project where Config Connector is - installed: - - Cloud SQL Admin API - - Compute Engine API - - The following APIs enabled in the project managed by Config Connector: - - Cloud SQL Admin API - - Compute Engine API - - Service Networking API - -# SETTERS -| NAME | VALUE | SET BY | DESCRIPTION | COUNT | -|---------------|------------------------|-----------------|-------------------------------|-------| -| database-name | mysql-private-database | package-default | name of SQL database | 1 | -| instance-name | mysql-private-instance | package-default | name of SQL instance | 3 | -| password | ${PASSWORD?} | PLACEHOLDER | SQL password (base64 encoded) | 1 | -| region | us-central1 | package-default | region of SQL instance | 1 | -| username | ${USERNAME?} | PLACEHOLDER | name of SQL user | 1 | - -# USAGE - - Configure setters using kpt as follows: - ``` - kpt cfg set . NAME VALUE - ``` - Setting placeholder values is required, changing package-defaults is optional. - - Set `username` to the SQL username that you will use to access the database. - ``` - kpt cfg set . username your-username - ``` - _Optionally_ set `database-name`, `instance-name`, and `region` in the same -manner. Note that if your instance is deleted the name you used will be -reserved for 7 days. You will need to use a new name in order to re-create the -instance. - - `password` should be set to a [base64 encoded](https://kubernetes.io/docs/concepts/configuration/secret/#creating-a-secret-manually) value. - ``` - kpt cfg set . password $(echo -n 'your-password' | base64) - ``` - Due to the bug in Config Connector ([more details](https://github.com/GoogleCloudPlatform/k8s-config-connector/issues/148)), the following resources must be in a ready state before the SQLInstance YAML is applied: - - `ComputeNetwork` - - `ComputeAddress` - - `ServiceNetworkingConnection` - - To ensure this is the case, use the following: - ``` - kubectl apply -f network - kubectl wait --for=condition=Ready -f network - kubectl apply -f sql - ``` - -# LICENSE - Apache 2.0 - See [LICENSE](/LICENSE) for more information. diff --git a/config-connector/solutions/sql/kpt/mysql-private/network/computeaddress.yaml b/config-connector/solutions/sql/kpt/mysql-private/network/computeaddress.yaml deleted file mode 100644 index fa32cf3bc21..00000000000 --- a/config-connector/solutions/sql/kpt/mysql-private/network/computeaddress.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: compute.cnrm.cloud.google.com/v1beta1 -kind: ComputeAddress -metadata: - name: mysql-private-address -spec: - addressType: INTERNAL - location: global - purpose: VPC_PEERING - prefixLength: 16 - networkRef: - name: mysql-private-network diff --git a/config-connector/solutions/sql/kpt/mysql-private/network/computenetwork.yaml b/config-connector/solutions/sql/kpt/mysql-private/network/computenetwork.yaml deleted file mode 100644 index 7e40709dbf5..00000000000 --- a/config-connector/solutions/sql/kpt/mysql-private/network/computenetwork.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: compute.cnrm.cloud.google.com/v1beta1 -kind: ComputeNetwork -metadata: - name: mysql-private-network diff --git a/config-connector/solutions/sql/kpt/mysql-private/network/servicenetworkingconnection.yaml b/config-connector/solutions/sql/kpt/mysql-private/network/servicenetworkingconnection.yaml deleted file mode 100644 index 6c0d87f4a7f..00000000000 --- a/config-connector/solutions/sql/kpt/mysql-private/network/servicenetworkingconnection.yaml +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: servicenetworking.cnrm.cloud.google.com/v1beta1 -kind: ServiceNetworkingConnection -metadata: - name: mysql-private-connection -spec: - networkRef: - name: mysql-private-network - reservedPeeringRanges: - - name: mysql-private-address - service: servicenetworking.googleapis.com diff --git a/config-connector/solutions/sql/kpt/mysql-private/sql/secret.yaml b/config-connector/solutions/sql/kpt/mysql-private/sql/secret.yaml deleted file mode 100644 index ede8e0f1279..00000000000 --- a/config-connector/solutions/sql/kpt/mysql-private/sql/secret.yaml +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: v1 -kind: Secret -metadata: - name: mysql-private-secret -data: - password: ${PASSWORD?} # {"$ref":"#/definitions/io.k8s.cli.setters.password"} diff --git a/config-connector/solutions/sql/kpt/mysql-private/sql/sqldatabase.yaml b/config-connector/solutions/sql/kpt/mysql-private/sql/sqldatabase.yaml deleted file mode 100644 index 252133f3228..00000000000 --- a/config-connector/solutions/sql/kpt/mysql-private/sql/sqldatabase.yaml +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: sql.cnrm.cloud.google.com/v1beta1 -kind: SQLDatabase -metadata: - name: mysql-private-database # {"$ref":"#/definitions/io.k8s.cli.setters.database-name"} -spec: - charset: utf8mb4 - collation: utf8mb4_bin - instanceRef: - name: mysql-private-instance # {"$ref":"#/definitions/io.k8s.cli.setters.instance-name"} diff --git a/config-connector/solutions/sql/kpt/mysql-private/sql/sqlinstance.yaml b/config-connector/solutions/sql/kpt/mysql-private/sql/sqlinstance.yaml deleted file mode 100644 index 7311d84b27d..00000000000 --- a/config-connector/solutions/sql/kpt/mysql-private/sql/sqlinstance.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: sql.cnrm.cloud.google.com/v1beta1 -kind: SQLInstance -metadata: - name: mysql-private-instance # {"$ref":"#/definitions/io.k8s.cli.setters.instance-name"} -spec: - databaseVersion: MYSQL_5_7 - region: us-central1 # {"$ref":"#/definitions/io.k8s.cli.setters.region"} - settings: - tier: db-f1-micro - ipConfiguration: - ipv4Enabled: false - privateNetworkRef: - name: mysql-private-network diff --git a/config-connector/solutions/sql/kpt/mysql-private/sql/sqluser.yaml b/config-connector/solutions/sql/kpt/mysql-private/sql/sqluser.yaml deleted file mode 100644 index 22414ae1531..00000000000 --- a/config-connector/solutions/sql/kpt/mysql-private/sql/sqluser.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: sql.cnrm.cloud.google.com/v1beta1 -kind: SQLUser -metadata: - name: ${USERNAME?} # {"$ref":"#/definitions/io.k8s.cli.setters.username"} -spec: - instanceRef: - name: mysql-private-instance # {"$ref":"#/definitions/io.k8s.cli.setters.instance-name"} - host: "%" - password: - valueFrom: - secretKeyRef: - name: mysql-private-secret - key: password diff --git a/config-connector/solutions/sql/kpt/mysql-public/Kptfile b/config-connector/solutions/sql/kpt/mysql-public/Kptfile deleted file mode 100644 index 861ab857767..00000000000 --- a/config-connector/solutions/sql/kpt/mysql-public/Kptfile +++ /dev/null @@ -1,36 +0,0 @@ -apiVersion: kpt.dev/v1alpha1 -kind: Kptfile -metadata: - name: mysql-public -packageMetadata: - shortDescription: configures a public MySQL database -openAPI: - definitions: - io.k8s.cli.setters.instance-name: - description: name of SQL instance - x-k8s-cli: - setter: - name: instance-name - value: mysql-public-solution - setBy: package-default - io.k8s.cli.setters.authorized-network: - description: name of authorized network - x-k8s-cli: - setter: - name: authorized-network - value: mysql-public-solution-sample - setBy: package-default - io.k8s.cli.setters.authorized-network-cidr: - description: authorized network CIDR range - x-k8s-cli: - setter: - name: authorized-network-cidr - value: 130.211.0.0/28 - setBy: package-default - io.k8s.cli.setters.password: - description: SQL password (base64) - x-k8s-cli: - setter: - name: password - value: ${PASSWORD?} - setBy: PLACEHOLDER diff --git a/config-connector/solutions/sql/kpt/mysql-public/README.md b/config-connector/solutions/sql/kpt/mysql-public/README.md deleted file mode 100644 index 9a156b15c63..00000000000 --- a/config-connector/solutions/sql/kpt/mysql-public/README.md +++ /dev/null @@ -1,53 +0,0 @@ -MySQL Public -================================================== -# NAME - mysql-public -# SYNOPSIS - Config Connector compatible yaml files to configure a public MySQL database -# CONSUMPTION - Download the package using [kpt](https://googlecontainertools.github.io/kpt/). - ``` - kpt pkg get https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit.git/config-connector/solutions/sql/kpt/mysql-public mysql-public - ``` -# REQUIREMENTS - - A working Config Connector instance using the "cnrm-system" service - account with either `roles/cloudsql.admin` or `roles/owner` in the project - managed by Config Connector - - Cloud SQL Admin API enabled in the project where Config Connector is - installed - - Cloud SQL Admin API enabled in the project managed by Config Connector if - it is a different project - -# SETTERS -| NAME | VALUE | SET BY | DESCRIPTION | COUNT | -|-------------------------|------------------------------|-----------------|-------------------------------|-------| -| authorized-network | mysql-public-solution-sample | package-default | name of authorized network | 1 | -| authorized-network-cidr | 130.211.0.0/28 | package-default | authorized network CIDR range | 1 | -| instance-name | mysql-public-solution | package-default | name of SQL instance | 3 | -| password | ${PASSWORD?} | PLACEHOLDER | SQL password (base64) | 1 | -# USAGE - Configure setters using kpt as follows: - ``` - kpt cfg set . NAME VALUE - ``` - Setting placeholder values is required, changing package-defaults is optional. - - `password` should be set to a [base64 encoded](https://kubernetes.io/docs/concepts/configuration/secret/#creating-a-secret-manually) value. - ``` - kpt cfg set . password $(echo -n 'your-password' | base64) - ``` - - _Optionally_ set `authorized-network`, `authorized-network-cidr`, and `instance-name` in the manner specified above. - - **Note:** If your SQL Instance is deleted, the name you used will be reserved -for **7 days**. In order to re-apply this solution, you need to run -`kpt cfg set . instance-name new-instance-name` to change to a new -instance name that hasn't been used in the last 7 days. - - Once the configuration is satisfactory, apply: - ``` - kubectl apply -f . - ``` -# LICENSE - Apache 2.0 - See [LICENSE](/LICENSE) for more information. - diff --git a/config-connector/solutions/sql/kpt/mysql-public/secret.yaml b/config-connector/solutions/sql/kpt/mysql-public/secret.yaml deleted file mode 100644 index 1040efc7ded..00000000000 --- a/config-connector/solutions/sql/kpt/mysql-public/secret.yaml +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: v1 -kind: Secret -metadata: - name: mysql-public-secret -data: - password: ${PASSWORD?} # {"$ref":"#/definitions/io.k8s.cli.setters.password"} diff --git a/config-connector/solutions/sql/kpt/mysql-public/sqldatabase.yaml b/config-connector/solutions/sql/kpt/mysql-public/sqldatabase.yaml deleted file mode 100644 index 5e31be7d544..00000000000 --- a/config-connector/solutions/sql/kpt/mysql-public/sqldatabase.yaml +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: sql.cnrm.cloud.google.com/v1beta1 -kind: SQLDatabase -metadata: - name: mysql-public-database -spec: - charset: utf8 - collation: utf8_general_ci - instanceRef: - name: mysql-public-solution # {"$ref":"#/definitions/io.k8s.cli.setters.instance-name"} diff --git a/config-connector/solutions/sql/kpt/mysql-public/sqlinstance.yaml b/config-connector/solutions/sql/kpt/mysql-public/sqlinstance.yaml deleted file mode 100644 index 0d52c3c2b3c..00000000000 --- a/config-connector/solutions/sql/kpt/mysql-public/sqlinstance.yaml +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: sql.cnrm.cloud.google.com/v1beta1 -kind: SQLInstance -metadata: - name: mysql-public-solution # {"$ref":"#/definitions/io.k8s.cli.setters.instance-name"} -spec: - databaseVersion: MYSQL_5_6 - region: us-central1 - settings: - activationPolicy: ALWAYS - diskAutoresize: true - diskSize: 10 - diskType: PD_SSD - pricingPlan: PER_USE - replicationType: SYNCHRONOUS - tier: db-n1-standard-1 - backupConfiguration: - enabled: false - ipConfiguration: - ipv4Enabled: true - requireSsl: true - authorizedNetworks: - - name: mysql-public-solution-sample # {"$ref":"#/definitions/io.k8s.cli.setters.authorized-network"} - value: 130.211.0.0/28 # {"$ref":"#/definitions/io.k8s.cli.setters.authorized-network-cidr"} - locationPreference: - zone: us-central1-c - maintenanceWindow: - day: 1 - hour: 23 - updateTrack: canary diff --git a/config-connector/solutions/sql/kpt/mysql-public/sqluser.yaml b/config-connector/solutions/sql/kpt/mysql-public/sqluser.yaml deleted file mode 100644 index b1094140684..00000000000 --- a/config-connector/solutions/sql/kpt/mysql-public/sqluser.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: sql.cnrm.cloud.google.com/v1beta1 -kind: SQLUser -metadata: - name: default -spec: - instanceRef: - name: mysql-public-solution # {"$ref":"#/definitions/io.k8s.cli.setters.instance-name"} - password: - valueFrom: - secretKeyRef: - name: mysql-public-secret - key: password diff --git a/config-connector/solutions/sql/kpt/postgres-ha/Kptfile b/config-connector/solutions/sql/kpt/postgres-ha/Kptfile deleted file mode 100644 index 148b21848d6..00000000000 --- a/config-connector/solutions/sql/kpt/postgres-ha/Kptfile +++ /dev/null @@ -1,137 +0,0 @@ -apiVersion: kpt.dev/v1alpha1 -kind: Kptfile -metadata: - name: postgres-ha -packageMetadata: - shortDescription: a Postgres high availability cluster -openAPI: - definitions: - io.k8s.cli.setters.instance-name: - description: name of main SQL instance - x-k8s-cli: - setter: - name: instance-name - value: postgres-ha-solution - setBy: package-default - io.k8s.cli.setters.region: - description: region of SQL instance - x-k8s-cli: - setter: - name: region - value: us-central1 - setBy: package-default - io.k8s.cli.setters.database-1-name: - description: name of first SQL database - x-k8s-cli: - setter: - name: database-1-name - value: postgres-ha-database-1 - setBy: package-default - io.k8s.cli.setters.database-2-name: - description: name of second SQL database - x-k8s-cli: - setter: - name: database-2-name - value: postgres-ha-database-2 - setBy: package-default - io.k8s.cli.substitutions.replica-instance-1-name: - x-k8s-cli: - substitution: - name: replica-instance-1-name - pattern: INSTANCE_NAME_SETTER-replica-1 - values: - - marker: INSTANCE_NAME_SETTER - ref: '#/definitions/io.k8s.cli.setters.instance-name' - io.k8s.cli.substitutions.replica-instance-2-name: - x-k8s-cli: - substitution: - name: replica-instance-2-name - pattern: INSTANCE_NAME_SETTER-replica-2 - values: - - marker: INSTANCE_NAME_SETTER - ref: '#/definitions/io.k8s.cli.setters.instance-name' - io.k8s.cli.substitutions.replica-instance-3-name: - x-k8s-cli: - substitution: - name: replica-instance-3-name - pattern: INSTANCE_NAME_SETTER-replica-3 - values: - - marker: INSTANCE_NAME_SETTER - ref: '#/definitions/io.k8s.cli.setters.instance-name' - io.k8s.cli.setters.zone-replica-3: - description: zone of third replica instance - x-k8s-cli: - setter: - name: zone-replica-3 - value: us-central1-c - setBy: package-default - io.k8s.cli.setters.zone: - description: zone of main instance - x-k8s-cli: - setter: - name: zone - value: us-central1-c - setBy: package-default - io.k8s.cli.setters.zone-replica-2: - description: zone of second replica instance - x-k8s-cli: - setter: - name: zone-replica-2 - value: us-central1-b - setBy: package-default - io.k8s.cli.setters.zone-replica-1: - description: zone of first replica instance - x-k8s-cli: - setter: - name: zone-replica-1 - value: us-central1-a - setBy: package-default - io.k8s.cli.setters.password-1: - description: password of user - x-k8s-cli: - setter: - name: password-1 - value: ${PASSWORD_1?} - setBy: PLACEHOLDER - io.k8s.cli.setters.password-2: - description: password of user - x-k8s-cli: - setter: - name: password-2 - value: ${PASSWORD_2?} - setBy: PLACEHOLDER - io.k8s.cli.setters.password-3: - description: password of user - x-k8s-cli: - setter: - name: password-3 - value: ${PASSWORD_3?} - setBy: PLACEHOLDER - io.k8s.cli.setters.username-1: - description: name of user - x-k8s-cli: - setter: - name: username-1 - value: ${USERNAME_1?} - setBy: PLACEHOLDER - io.k8s.cli.setters.username-2: - description: name of user - x-k8s-cli: - setter: - name: username-2 - value: ${USERNAME_2?} - setBy: PLACEHOLDER - io.k8s.cli.setters.username-3: - description: name of user - x-k8s-cli: - setter: - name: username-3 - value: ${USERNAME_3?} - setBy: PLACEHOLDER - io.k8s.cli.setters.external-ip-range: - description: ip range to allow to connect - x-k8s-cli: - setter: - name: external-ip-range - value: 192.10.10.10/32 - setBy: package-default diff --git a/config-connector/solutions/sql/kpt/postgres-ha/README.md b/config-connector/solutions/sql/kpt/postgres-ha/README.md deleted file mode 100644 index 2bdc3997cbf..00000000000 --- a/config-connector/solutions/sql/kpt/postgres-ha/README.md +++ /dev/null @@ -1,74 +0,0 @@ -PostgreSQL High Availability -================================================== -# NAME - postgres-ha -# SYNOPSIS - Config Connector compatible yaml files to configure a high availability PostgreSQL cluster -# CONSUMPTION - Download the package using [kpt](https://googlecontainertools.github.io/kpt/). - ``` - kpt pkg get https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit.git/config-connector/solutions/sql/kpt/postgres-ha postgres-ha - ``` -# REQUIREMENTS - - A working Config Connector instance using the "cnrm-system" service - account with either `roles/cloudsql.admin` or `roles/owner` in the project - managed by Config Connector - - Cloud SQL Admin API enabled in the project where Config Connector is - installed - - Cloud SQL Admin API enabled in the project managed by Config Connector if - it is a different project - -# SETTERS -| NAME | VALUE | SET BY | DESCRIPTION | COUNT | -|-------------------|------------------------|-----------------|--------------------------------|-------| -| database-1-name | postgres-ha-database-1 | package-default | name of first SQL database | 1 | -| database-2-name | postgres-ha-database-2 | package-default | name of second SQL database | 1 | -| external-ip-range | 192.10.10.10/32 | package-default | ip range to allow to connect | 4 | -| instance-name | postgres-ha-solution | package-default | name of main SQL instance | 9 | -| password-1 | ${PASSWORD_1?} | PLACEHOLDER | password of user | 1 | -| password-2 | ${PASSWORD_2?} | PLACEHOLDER | password of user | 1 | -| password-3 | ${PASSWORD_3?} | PLACEHOLDER | password of user | 1 | -| region | us-central1 | package-default | region of SQL instance | 4 | -| username-1 | ${USERNAME_1?} | PLACEHOLDER | name of user | 1 | -| username-2 | ${USERNAME_2?} | PLACEHOLDER | name of user | 1 | -| username-3 | ${USERNAME_3?} | PLACEHOLDER | name of user | 1 | -| zone | us-central1-c | package-default | zone of main instance | 1 | -| zone-replica-1 | us-central1-a | package-default | zone of first replica instance | 1 | -| zone-replica-2 | us-central1-b | package-default | zone of second replica instance| 1 | -| zone-replica-3 | us-central1-c | package-default | zone of third replica instance | 1 | -# USAGE - Configure setters using kpt as follows: - ``` - kpt cfg set . NAME VALUE - ``` - Setting placeholder values is required, changing package-defaults is optional. - - Set `username-1`, `username-2', and `username-3` to the SQL usernames that you will use to access the database. - ``` - kpt cfg set . username-1 first-username - kpt cfg set . username-2 second-username - kpt cfg set . username-3 third-username - ``` - `password-1`, `password-2`, and `password-3` should be set to [base64 -encoded](https://kubernetes.io/docs/concepts/configuration/secret/#creating-a-secret-manually) -values. - ``` - kpt cfg set . password-1 $(echo -n 'first-password' | base64) - kpt cfg set . password-2 $(echo -n 'second-password' | base64) - kpt cfg set . password-3 $(echo -n 'third-password' | base64) - ``` - _Optionally_ set `database-name`, `instance-name`, `region`, `zone`, and -`zone-replica` in the same manner. - - **Note:** If your SQL Instance is deleted, the name you used will be reserved -for **7 days**. In order to re-apply this solution, you need to run -`kpt cfg set . instance-name new-instance-name` to change to a new -instance name that hasn't been used in the last 7 days. - - Once the configuration is satisfactory, apply: - ``` - kubectl apply -f . - ``` -# LICENSE - Apache 2.0 - See [LICENSE](/LICENSE) for more information. - diff --git a/config-connector/solutions/sql/kpt/postgres-ha/secret.yaml b/config-connector/solutions/sql/kpt/postgres-ha/secret.yaml deleted file mode 100644 index 68e3797eeef..00000000000 --- a/config-connector/solutions/sql/kpt/postgres-ha/secret.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: v1 -kind: Secret -metadata: - name: postgres-ha-secret -data: - password1: ${PASSWORD_1?} # {"$ref":"#/definitions/io.k8s.cli.setters.password-1"} - password2: ${PASSWORD_2?} # {"$ref":"#/definitions/io.k8s.cli.setters.password-2"} - password3: ${PASSWORD_3?} # {"$ref":"#/definitions/io.k8s.cli.setters.password-3"} diff --git a/config-connector/solutions/sql/kpt/postgres-ha/sqldatabase.yaml b/config-connector/solutions/sql/kpt/postgres-ha/sqldatabase.yaml deleted file mode 100644 index cef374a5749..00000000000 --- a/config-connector/solutions/sql/kpt/postgres-ha/sqldatabase.yaml +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: sql.cnrm.cloud.google.com/v1beta1 -kind: SQLDatabase -metadata: - name: postgres-ha-database-1 # {"$ref":"#/definitions/io.k8s.cli.setters.database-1-name"} -spec: - charset: UTF8 - collation: en_US.UTF8 - instanceRef: - name: postgres-ha-solution # {"$ref":"#/definitions/io.k8s.cli.setters.instance-name"} ---- -apiVersion: sql.cnrm.cloud.google.com/v1beta1 -kind: SQLDatabase -metadata: - name: postgres-ha-database-2 # {"$ref":"#/definitions/io.k8s.cli.setters.database-2-name"} -spec: - charset: UTF8 - collation: en_US.UTF8 - instanceRef: - name: postgres-ha-solution # {"$ref":"#/definitions/io.k8s.cli.setters.instance-name"} diff --git a/config-connector/solutions/sql/kpt/postgres-ha/sqlinstance.yaml b/config-connector/solutions/sql/kpt/postgres-ha/sqlinstance.yaml deleted file mode 100644 index 9c8ff9c1cad..00000000000 --- a/config-connector/solutions/sql/kpt/postgres-ha/sqlinstance.yaml +++ /dev/null @@ -1,153 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: sql.cnrm.cloud.google.com/v1beta1 -kind: SQLInstance -metadata: - name: postgres-ha-solution # {"$ref":"#/definitions/io.k8s.cli.setters.instance-name"} -spec: - databaseVersion: POSTGRES_9_6 - region: us-central1 # {"$ref":"#/definitions/io.k8s.cli.setters.region"} - settings: - activationPolicy: ALWAYS - diskAutoresize: true - diskSize: 10 - diskType: PD_SSD - pricingPlan: PER_USE - replicationType: SYNCHRONOUS - tier: db-custom-2-13312 - availabilityType: REGIONAL - backupConfiguration: - binaryLogEnabled: false - enabled: true - startTime: 20:55 - databaseFlags: - - name: autovacuum - value: "off" - ipConfiguration: - ipv4Enabled: true - requireSsl: true - authorizedNetworks: - - name: postgres-ha-solution-cidr - value: 192.10.10.10/32 # {"$ref":"#/definitions/io.k8s.cli.setters.external-ip-range"} - locationPreference: - zone: us-central1-c # {"$ref":"#/definitions/io.k8s.cli.setters.zone"} - maintenanceWindow: - day: 7 - hour: 12 - updateTrack: stable ---- -apiVersion: sql.cnrm.cloud.google.com/v1beta1 -kind: SQLInstance -metadata: - name: postgres-ha-solution-replica-1 # {"$ref":"#/definitions/io.k8s.cli.substitutions.replica-instance-1-name"} -spec: - databaseVersion: POSTGRES_9_6 - region: us-central1 # {"$ref":"#/definitions/io.k8s.cli.setters.region"} - masterInstanceRef: - name: postgres-ha-solution # {"$ref":"#/definitions/io.k8s.cli.setters.instance-name"} - settings: - activationPolicy: ALWAYS - crashSafeReplication: true - diskAutoresize: true - diskSize: 10 - diskType: PD_HDD - pricingPlan: PER_USE - replicationType: SYNCHRONOUS - tier: db-custom-2-13312 - availabilityType: ZONAL - databaseFlags: - - name: autovacuum - value: "off" - ipConfiguration: - ipv4Enabled: true - requireSsl: false - authorizedNetworks: - - name: postgres-ha-solution-cidr - value: 192.10.10.10/32 # {"$ref":"#/definitions/io.k8s.cli.setters.external-ip-range"} - locationPreference: - zone: us-central1-a # {"$ref":"#/definitions/io.k8s.cli.setters.zone-replica-1"} - maintenanceWindow: - day: 7 - hour: 12 - updateTrack: stable ---- -apiVersion: sql.cnrm.cloud.google.com/v1beta1 -kind: SQLInstance -metadata: - name: postgres-ha-solution-replica-2 # {"$ref":"#/definitions/io.k8s.cli.substitutions.replica-instance-2-name"} -spec: - databaseVersion: POSTGRES_9_6 - region: us-central1 # {"$ref":"#/definitions/io.k8s.cli.setters.region"} - masterInstanceRef: - name: postgres-ha-solution # {"$ref":"#/definitions/io.k8s.cli.setters.instance-name"} - settings: - activationPolicy: ALWAYS - crashSafeReplication: true - diskAutoresize: true - diskSize: 10 - diskType: PD_HDD - pricingPlan: PER_USE - replicationType: SYNCHRONOUS - tier: db-custom-2-13312 - availabilityType: ZONAL - databaseFlags: - - name: autovacuum - value: "off" - ipConfiguration: - ipv4Enabled: true - requireSsl: false - authorizedNetworks: - - name: postgres-ha-solution-cidr - value: 192.10.10.10/32 # {"$ref":"#/definitions/io.k8s.cli.setters.external-ip-range"} - locationPreference: - zone: us-central1-b # {"$ref":"#/definitions/io.k8s.cli.setters.zone-replica-2"} - maintenanceWindow: - day: 7 - hour: 12 - updateTrack: stable ---- -apiVersion: sql.cnrm.cloud.google.com/v1beta1 -kind: SQLInstance -metadata: - name: postgres-ha-solution-replica-3 # {"$ref":"#/definitions/io.k8s.cli.substitutions.replica-instance-3-name"} -spec: - databaseVersion: POSTGRES_9_6 - region: us-central1 # {"$ref":"#/definitions/io.k8s.cli.setters.region"} - masterInstanceRef: - name: postgres-ha-solution # {"$ref":"#/definitions/io.k8s.cli.setters.instance-name"} - settings: - activationPolicy: ALWAYS - crashSafeReplication: true - diskAutoresize: true - diskSize: 10 - diskType: PD_HDD - pricingPlan: PER_USE - replicationType: SYNCHRONOUS - tier: db-custom-2-13312 - availabilityType: ZONAL - databaseFlags: - - name: autovacuum - value: "off" - ipConfiguration: - ipv4Enabled: true - requireSsl: false - authorizedNetworks: - - name: postgres-ha-solution-cidr - value: 192.10.10.10/32 # {"$ref":"#/definitions/io.k8s.cli.setters.external-ip-range"} - locationPreference: - zone: us-central1-c # {"$ref":"#/definitions/io.k8s.cli.setters.zone-replica-3"} - maintenanceWindow: - day: 7 - hour: 12 - updateTrack: stable diff --git a/config-connector/solutions/sql/kpt/postgres-ha/sqluser.yaml b/config-connector/solutions/sql/kpt/postgres-ha/sqluser.yaml deleted file mode 100644 index d5dcd105a32..00000000000 --- a/config-connector/solutions/sql/kpt/postgres-ha/sqluser.yaml +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: sql.cnrm.cloud.google.com/v1beta1 -kind: SQLUser -metadata: - name: ${USERNAME_1?} # {"$ref":"#/definitions/io.k8s.cli.setters.username-1"} -spec: - instanceRef: - name: postgres-ha-solution # {"$ref":"#/definitions/io.k8s.cli.setters.instance-name"} - password: - valueFrom: - secretKeyRef: - name: postgres-ha-secret - key: password1 ---- -apiVersion: sql.cnrm.cloud.google.com/v1beta1 -kind: SQLUser -metadata: - name: ${USERNAME_2?} # {"$ref":"#/definitions/io.k8s.cli.setters.username-2"} -spec: - instanceRef: - name: postgres-ha-solution # {"$ref":"#/definitions/io.k8s.cli.setters.instance-name"} - password: - valueFrom: - secretKeyRef: - name: postgres-ha-secret - key: password2 ---- -apiVersion: sql.cnrm.cloud.google.com/v1beta1 -kind: SQLUser -metadata: - name: ${USERNAME_3?} # {"$ref":"#/definitions/io.k8s.cli.setters.username-3"} -spec: - instanceRef: - name: postgres-ha-solution # {"$ref":"#/definitions/io.k8s.cli.setters.instance-name"} - password: - valueFrom: - secretKeyRef: - name: postgres-ha-secret - key: password3 diff --git a/config-connector/solutions/sql/kpt/postgres-public/Kptfile b/config-connector/solutions/sql/kpt/postgres-public/Kptfile deleted file mode 100644 index aa7f3c465e7..00000000000 --- a/config-connector/solutions/sql/kpt/postgres-public/Kptfile +++ /dev/null @@ -1,36 +0,0 @@ -apiVersion: kpt.dev/v1alpha1 -kind: Kptfile -metadata: - name: postgres-public -packageMetadata: - shortDescription: configures a public PostgreSQL database -openAPI: - definitions: - io.k8s.cli.setters.instance-name: - description: name of SQL instance - x-k8s-cli: - setter: - name: instance-name - value: postgres-public-solution - setBy: package-default - io.k8s.cli.setters.password: - description: password for SQL user - x-k8s-cli: - setter: - name: password - value: ${PASSWORD?} - setBy: PLACEHOLDER - io.k8s.cli.setters.authorized-network: - description: name of authorized network - x-k8s-cli: - setter: - name: authorized-network - value: postgres-public-solution-sample - setBy: package-default - io.k8s.cli.setters.authorized-network-cidr: - description: authorized network CIDR range - x-k8s-cli: - setter: - name: authorized-network-cidr - value: 130.211.0.0/28 - setBy: package-default diff --git a/config-connector/solutions/sql/kpt/postgres-public/README.md b/config-connector/solutions/sql/kpt/postgres-public/README.md deleted file mode 100644 index 992ba525124..00000000000 --- a/config-connector/solutions/sql/kpt/postgres-public/README.md +++ /dev/null @@ -1,54 +0,0 @@ -PostgreSQL Public -================================================== -# NAME - postgres-public -# SYNOPSIS - Config Connector compatible yaml files to configure a public PostgreSQL database -# CONSUMPTION - Download the package using [kpt](https://googlecontainertools.github.io/kpt/). - ``` - kpt pkg get https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit.git/config-connector/solutions/sql/kpt/postgres-public postgres-public - ``` -# REQUIREMENTS - - A working Config Connector instance using the "cnrm-system" service - account with either `roles/cloudsql.admin` or `roles/owner` in the project - managed by Config Connector - - Cloud SQL Admin API enabled in the project where Config Connector is - installed - - Cloud SQL Admin API enabled in the project managed by Config Connector if - it is a different project - -# SETTERS -| NAME | VALUE | SET BY | DESCRIPTION | COUNT | -|-------------------------|---------------------------------|-----------------|-------------------------------|-------| -| authorized-network | postgres-public-solution-sample | package-default | name of authorized network | 1 | -| authorized-network-cidr | 130.211.0.0/28 | package-default | authorized network CIDR range | 1 | -| instance-name | postgres-ha-solution | package-default | name of SQL instance | 3 | -| password | ${PASSWORD?} | PLACEHOLDER | password for SQL user | 1 | -# USAGE - Configure setters using kpt as follows: - ``` - kpt cfg set . NAME VALUE - ``` - Setting placeholder values is required, changing package-defaults is optional. - - `password` should be set to a [base64 -encoded](https://kubernetes.io/docs/concepts/configuration/secret/#creating-a-secret-manually) -value. - ``` - kpt cfg set . password $(echo -n 'password' | base64) - ``` - _Optionally_ set `authorized-network`, `authorized-network-cidr`, and `instance-name` in the same manner. - - **Note:** If your SQL Instance is deleted, the name you used will be reserved -for **7 days**. In order to re-apply this solution, you need to run -`kpt cfg set . instance-name new-instance-name` to change to a new -instance name that hasn't been used in the last 7 days. - - Once the configuration is satisfactory, apply: - ``` - kubectl apply -f . - ``` -# LICENSE - Apache 2.0 - See [LICENSE](/LICENSE) for more information. - diff --git a/config-connector/solutions/sql/kpt/postgres-public/secret.yaml b/config-connector/solutions/sql/kpt/postgres-public/secret.yaml deleted file mode 100644 index 0f5ffe2f828..00000000000 --- a/config-connector/solutions/sql/kpt/postgres-public/secret.yaml +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: v1 -kind: Secret -metadata: - name: postgres-public-secret -data: - password: ${PASSWORD?} # {"$ref":"#/definitions/io.k8s.cli.setters.password"} diff --git a/config-connector/solutions/sql/kpt/postgres-public/sqldatabase.yaml b/config-connector/solutions/sql/kpt/postgres-public/sqldatabase.yaml deleted file mode 100644 index ff75806ad27..00000000000 --- a/config-connector/solutions/sql/kpt/postgres-public/sqldatabase.yaml +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: sql.cnrm.cloud.google.com/v1beta1 -kind: SQLDatabase -metadata: - name: postgres-public-database -spec: - charset: UTF8 - collation: en_US.UTF8 - instanceRef: - name: postgres-public-solution # {"$ref":"#/definitions/io.k8s.cli.setters.instance-name"} diff --git a/config-connector/solutions/sql/kpt/postgres-public/sqlinstance.yaml b/config-connector/solutions/sql/kpt/postgres-public/sqlinstance.yaml deleted file mode 100644 index f357a6729d8..00000000000 --- a/config-connector/solutions/sql/kpt/postgres-public/sqlinstance.yaml +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: sql.cnrm.cloud.google.com/v1beta1 -kind: SQLInstance -metadata: - name: postgres-public-solution # {"$ref":"#/definitions/io.k8s.cli.setters.instance-name"} -spec: - databaseVersion: POSTGRES_9_6 - region: us-central1 - settings: - activationPolicy: ALWAYS - diskAutoresize: true - diskSize: 10 - diskType: PD_SSD - pricingPlan: PER_USE - replicationType: SYNCHRONOUS - tier: db-f1-micro - availabilityType: ZONAL - backupConfiguration: - enabled: false - ipConfiguration: - ipv4Enabled: true - requireSsl: true - authorizedNetworks: - - name: postgres-public-solution-sample # {"$ref":"#/definitions/io.k8s.cli.setters.authorized-network"} - value: 130.211.0.0/28 # {"$ref":"#/definitions/io.k8s.cli.setters.authorized-network-cidr"} - locationPreference: - zone: us-central1-c - maintenanceWindow: - day: 1 - hour: 23 - updateTrack: canary diff --git a/config-connector/solutions/sql/kpt/postgres-public/sqluser.yaml b/config-connector/solutions/sql/kpt/postgres-public/sqluser.yaml deleted file mode 100644 index bd3e1c69315..00000000000 --- a/config-connector/solutions/sql/kpt/postgres-public/sqluser.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: sql.cnrm.cloud.google.com/v1beta1 -kind: SQLUser -metadata: - name: default -spec: - instanceRef: - name: postgres-public-solution # {"$ref":"#/definitions/io.k8s.cli.setters.instance-name"} - password: - valueFrom: - secretKeyRef: - name: postgres-public-secret - key: password diff --git a/config-connector/tests/README.md b/config-connector/tests/README.md deleted file mode 100644 index b77a7061ed2..00000000000 --- a/config-connector/tests/README.md +++ /dev/null @@ -1,260 +0,0 @@ -# Config Connector Solutions Testing CLI - -## Introduction - -This folder contains the Go CLI and testcases for testing the Config Connector -Solutions defined in [../solutions](../solutions) folder. - -* **[ccs-test/](./ccs-test/)** - Go code for the solutions test CLI -* **[testcases/](./testcases/)** - Testcases for each solution. If has - the same folder structure as the solutions, i.e. if the solution is under - ../solutions/iam/kpt/member-iam/, then the corresponding - testcases should be under ./testcases/iam/kpt/member-iam/ - - -**Note:** We only support testing kpt solutions. - -## Requirements - -* [gsutil](https://cloud.google.com/storage/docs/gsutil_install) -* [kpt](../solutions/README.md#kpt) -* a working Kubernetes cluster with Config Connector [installed and - configured]( - https://cloud.google.com/config-connector/docs/how-to/install-upgrade-uninstall) - * [Default namespace]( - https://cloud.google.com/config-connector/docs/how-to/install-upgrade-uninstall#setting_your_default_namespace) - should be [configured to the **project** where you want to manage the GCP - resources]( - https://cloud.google.com/config-connector/docs/how-to/install-upgrade-uninstall#specify_where_to_create_your_resources). - -## Consumption - -1. Clone GoogleCloudPlatform/cloud-foundation-toolkit repository: - - ``` - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit.git - ``` - -1. Go to the tests folder: - - ``` - cd cloud-foundation-toolkit/config-connector/tests - ``` - -1. Download the `test-cli` executable file: - - ``` - gsutil cp gs://kcc-solutions-test/test-cli test-cli - ``` -1. Change the file ACL to make it executable: - - ``` - chmod +x test-cli - ``` - -1. Set the environment variables for the tests. - - 1. Make a copy of the environments template file - ([./testcases/environments.template](./testcases/environments.template)) : - - ``` - cp ./testcases/environments.template ./testcases/environments.yaml - ``` - - 1. Edit the environments file - ([./testcases/environments.yaml](./testcases/environments.yaml)) to - update the environment variables. Use **any command or editing tool** - you prefer. E.g. you can use `sed` command: - - ``` - # YOUR_PROJECT_ID should be the project ID that your default namespace - # is annotated with. - sed -i 's/${DEFAULT_PROJECT_ID?}/[YOUR_PROJECT_ID]/g' ./testcases/environments.yaml - ``` - - **Note:** Please remember to set **ALL** the environment variables. - -1. Follow the README of each solution to configure permissions for the - "cnrm-system" service account, and enable necessary APIs. - -## How to run the tests? - -### Running a single test - -In order to run the test for a specific solution, you need to set the relative -path of the solution using `--path` or `-p` flag. - -**Note:** We only support testing **kpt** solutions specified under [testcases -folder](./testcases). - -Under the [tests](.) folder, run a test by providing the relative path: -``` -./test-cli run --path [RELATIVE_PATH] # E.g. "iam/kpt/member-iam" -``` - -Most test should take a few minutes to finish. But you'll need to specify the -timeout using the optional `--timeout` or `-t` flag for special test cases: - -**Note**: Running a special test case can take up to an hour. - -* [projects/kpt/shared-vpc](../solutions/projects/kpt/shared-vpc): 10m - ``` - ./test-cli run --path projects/kpt/shared-vpc --timeout 10m - ``` -* [sql/kpt/mysql-ha](../solutions/sql/kpt/mysql-ha): 20m - ``` - ./test-cli run --path sql/kpt/mysql-ha --timeout 20m - ``` -* [sql/kpt/mysql-public](../solutions/sql/kpt/mysql-public): 10m - ``` - ./test-cli run --path sql/kpt/mysql-public --timeout 10m - ``` -* [sql/kpt/postgres-ha](../solutions/sql/kpt/postgres-ha): 20m - ``` - ./test-cli run --path sql/kpt/postgres-ha --timeout 20m - ``` -* [sql/kpt/postgres-public](../solutions/sql/kpt/postgres-public): 10m - ``` - ./test-cli run --path sql/kpt/postgres-public --timeout 10m - ``` - -After you run the command, detailed output will be printed out. If you find the -last line of the output is `======Successfully finished the test for solution -RELATIVE_PATH]======`, it means the test run is successful. Otherwise, you'll -find the detailed error message for the failure. - -### Running all the tests - -You can also run all the tests at once using `--all` or `-a` flag. You'll need -to also set the timeout to be 20m or more in order to provide sufficient timeout -for the edge cases mentioned above. - -Under the [tests](.) folder, run all the tests: - -**Note:** It will take up to a few hours to finish running this command. - -``` -./test-cli run --all --timeout 20m -``` - -After you run the command, detailed output will be printed out. If you find the -last line of the output is `======Successfully finished all the tests======`, it -means all the tests have been completed successfully. Otherwise, you'll find the -detailed error message for the failure. - -### Exceptions - -Solutions that require manual steps can't be tested using our `test-cli`. Here -is the list of exceptions: - -* [projects/kpt/project-hierarchy]( - ../solutions/projects/kpt/project-hierarchy) - need to manually figure out - the folder ID before creating projects ([GitHub issue]( - https://github.com/GoogleCloudPlatform/k8s-config-connector/issues/104)) -* [sql/kpt/mysql-private](../solutions/sql/kpt/mysql-private) - need to create - resources following the specific order ([GitHub issue]( - https://github.com/GoogleCloudPlatform/k8s-config-connector/issues/148)) - -## How to add new tests? - -**Note:** We only support adding tests for kpt solutions. - -If you want to create tests for solution -`[SOLUTION_AREA]/kpt/[SOLUTION_NAME]` (e.g. `iam/kpt/member-iam`): - -1. Under your local copy of your - [forked](https://help.github.com/en/github/getting-started-with-github/fork-a-repo) - cloud-foundation-toolkit repository, go to the testcases folder: - - ``` - cd cloud-foundation-toolkit/config-connector/tests/testcases - ``` - -1. Create the folder if it doesn't exist: - - ``` - mkdir -p [SOLUTION_AREA]/kpt/[SOLUTION_NAME] - ``` - -1. Create the testcase YAML file `required_fields_only.yaml` from - `test_values.template` file: - - ``` - cp test_values.template [SOLUTION_AREA]/kpt/[SOLUTION_NAME]/required_fields_only.yaml - ``` - - **Note:** We only support one testcase, which only set required kpt setters - (setters set by PLACEHOLDER). The only exception is test cases for SQL - solutions. If the test SQL Instance is deleted, the name will be reserved - for **7 days**. In order to redo the test, the `instance-name` setter is - required in SQL test cases, and the name of the test data file is changed to - `required_fields_with_sql_instance_name.yaml`. - -1. Check if the solution requires any PLACEHOLDERs to be set: - - ``` - kpt cfg list-setters ../../solutions/[SOLUTION_AREA]/kpt/[SOLUTION_NAME] - ``` - -1. For each setter that is a placeholder, decide if the value should be a **new - globally unique** value. E.g., the value of a new project ID. - - 1. If the value **MUST** be globally unique, append the following key-value - pair in the testcase YAML file: - - ``` - # \$ENV_VAR is the placeholder to reference to ENV_VAR you've set in the - # environments file (./environments.yaml). - # In order to create globally unique resource names, you need to append - # `-\$RANDOM_ID` after the `\$ENV_VAR`. E.g. `\$PROJECT_ID-\$RANDOM_ID`. - echo "[SETTER_NAME]: \$ENV_VAR-\$RANDOM_ID" >> \ - [SOLUTION_AREA]/kpt/[SOLUTION_NAME]/required_fields_only.yaml - ``` - - 1. If the value doesn't need to be globally unique, append the following - key-value pair in the testcase YAML file: - ``` - # \$ENV_VAR is the placeholder to reference to ENV_VAR you've set in the - # environments file (./environments.yaml). E.g. `\$PROJECT_ID`. - echo "[SETTER_NAME]: \$ENV_VAR" >> \ - [SOLUTION_AREA]/kpt/[SOLUTION_NAME]/required_fields_only.yaml - ``` - - **Note:** Please don't use $ENV_VAR directly in the command. The back slash - ("\\") is necessary because here, it is a string, but not a variable. We - don't want to set the value of ENV_VAR in the testcase YAML file. - - **Note:** `$RANDOM_ID` is a placeholder for the autogen randomized suffix, - and `RANDOM_ID` shouldn't be the name of the env var. - -1. Check the environments template file - ([./environments.template](./environments.template)). For each environment - variable you need but does **NOT** exist in the environments template file, - add it: - - ``` - echo "ENV_VAR: \${ENV_VAR?}" >> ./environments.template - ``` - -1. Create the YAML file for the original values of the setters from - `test_values.template` file: - - ``` - cp test_values.template [SOLUTION_AREA]/kpt/[SOLUTION_NAME]/original_values.yaml - ``` - -1. For each placeholder setter and its original value (you can find them by - running - `kpt cfg list-setters ../../solutions/[SOLUTION_AREA]/kpt/[SOLUTION_NAME]`), - append the key-pairs to the original values YAML file: - - ``` - # You need to add the back slash ("\") in front of the original value - # because it is placeholder starting with "$". - echo "[SETTER_NAME]: \[ORIGINAL_VALUE]" >> \ - [SOLUTION_AREA]/kpt/[SOLUTION_NAME]/original_values.yaml - ``` - -# License - - Apache 2.0 - See [LICENSE](/LICENSE) for more information. diff --git a/config-connector/tests/ccs-test/cmd/root.go b/config-connector/tests/ccs-test/cmd/root.go deleted file mode 100644 index e105c9c3f32..00000000000 --- a/config-connector/tests/ccs-test/cmd/root.go +++ /dev/null @@ -1,467 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cmd - -import ( - "errors" - "fmt" - "io/ioutil" - "log" - "os" - "os/exec" - "path/filepath" - "regexp" - "strings" - "time" - - "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/config-connector/tests/ccs-test/util" - "github.com/ghodss/yaml" - "github.com/spf13/cobra" -) - -const ( - kubectlBinaryName = "kubectl" - testsDirPath = "config-connector/tests" - originalValuesFileName = "original_values.yaml" - requiredFieldsOnlyFileName = "required_fields_only.yaml" - requiredFieldsWithSQLInstanceNameFileName = "required_fields_with_sql_instance_name.yaml" - envFileRelativePath = "testcases/environments.yaml" - yamlFileSuffix = ".yaml" -) - -var ( - relativePath string - timeout string - testAll bool - - // Regex of the env vars that require a randomized suffix. - // It should be in the format of $ENV_VAR-$RANDOM_ID. - re = regexp.MustCompile(`\$(?P[A-Z]+|[A-Z]+[A-Z_]*[A-Z]+)(-\$RANDOM_ID)`) - - testFileNames = []string{requiredFieldsOnlyFileName, requiredFieldsWithSQLInstanceNameFileName} - - rootCmd = &cobra.Command{ - Use: "ccs-test", - Short: "CLI to test Config Connector Solutions", - Long: `CLI to test Config Connector Solutions`, - } - - runCmd = &cobra.Command{ - Use: "run", - Short: "Run a given test by the relative path --path", - Long: "Run a given test by the relative path --path", - Run: func(cmd *cobra.Command, args []string) { - // Check the required flag. - if relativePath == "" && !testAll { - log.Fatal("either \"--path\" or \"--all\" must be specified to run test") - } - - if relativePath != "" && testAll { - log.Fatal("\"--path\" and \"--all\" are mutually exclusive flags") - } - - relativePaths := []string{} - if testAll { - minTimeout, _ := time.ParseDuration("20m") - currentTimeout, err := time.ParseDuration(timeout) - if err != nil { - log.Fatalf("error setting the timeout to %q: %v", timeout, err) - } - - if currentTimeout < minTimeout { - log.Fatalf("\"--timeout\" must be set to 20m or more if --all is set") - } - - if relativePaths, err = getAllRelativePaths(); err != nil { - log.Fatalf("error retrieving the relative paths for all the test cases: %v", err) - } - - // Go through all the folders recursively under "./testcases" - log.Println("======Running all the tests...======\n") - - } else { - relativePaths = append(relativePaths, relativePath) - } - - for _, relativePath := range relativePaths { - log.Printf("======Testing solution %q...======\n", relativePath) - - // Calculate the path to testcase directory and solution directory. - current, err := os.Getwd() - if err != nil { - log.Fatalf("error retrieving the current directory: %v", err) - } - if !strings.HasSuffix(current, testsDirPath) { - log.Fatalf("error running tests under directory: %s. Please "+ - "follow the instructions in the README.", current) - } - testCasePath := filepath.Join(current, "testcases", relativePath) - envFilePath := filepath.Join(current, envFileRelativePath) - - parent := filepath.Dir(current) - solutionPath := filepath.Join(parent, "solutions", relativePath) - - // Clean up the left over resources if there are any. - if err := deleteResources(solutionPath); err != nil { - log.Fatalf("error cleaning up resources before running the "+ - "test. Please clean them up manually: %v", err) - } - - // Fetch the testcase values and run the test. - envValues := make(map[string]string) - if err := parseYamlToStringMap(envFilePath, envValues); err != nil { - log.Fatalf("error retrieving envrionment variables: %v", err) - } - - originalValues := make(map[string]string) - if err := parseYamlToStringMap(filepath.Join(testCasePath, originalValuesFileName), originalValues); err != nil { - log.Fatalf("error retrieving orginal values: %v", err) - } - - testValues := make(map[string]string) - for _, testFileName := range testFileNames { - if filePath, exists := hasTestFile(testCasePath, testFileName); exists { - if err := parseYamlToStringMap(filePath, testValues); err != nil { - log.Fatalf("error retrieving test values: %v", err) - } - break - } - } - - // Generate the random IDs first. - randomId, err := util.GenerateRandomizedSuffix() - if err != nil { - log.Fatalf("error generating the randomized suffix for resource names: %v", err) - } - - // Then populate the values of the env var and the random id. - realValues, err := finalizeValues(randomId, envValues, testValues) - if err != nil { - log.Fatalf("error finalizing test values: %v", err) - } - - if err := runKptTestcase(solutionPath, timeout, realValues, originalValues); err != nil { - log.Fatalf("test failed for solution %q: %v", relativePath, err) - } - - log.Printf("======Successfully finished the test for solution %q======\n", relativePath) - - } - - if testAll { - log.Println("======Successfully finished all the tests======\n") - } - }, - } -) - -func init() { - runCmd.PersistentFlags().StringVarP(&relativePath, "path", "p", "", "[Required][Mutually Exclusive with --all] The relative path to the folder of the solution's test cases, e.g. `iam/kpt/member-iam`.") - runCmd.PersistentFlags().StringVarP(&timeout, "timeout", "t", "60s", "[Optional] The timeout used to wait for resources to be READY. Default: `60s`.") - runCmd.PersistentFlags().BoolVarP(&testAll, "all", "a", false, "[Required][Mutually Exclusive with --path] Running all the solution test cases. Meanwhile, --timeout MUST be set to `20m` or more. Default: `false`.") - rootCmd.AddCommand(runCmd) -} - -// Execute adds all child commands to the root command and sets flags appropriately. -// This is called by main.main(). It only needs to happen once to the rootCmd. -func Execute() { - if err := rootCmd.Execute(); err != nil { - fmt.Println(err) - os.Exit(1) - } -} - -func getAllRelativePaths() ([]string, error) { - var queue []string - var relativePaths []string - - current, err := os.Getwd() - if err != nil { - return nil, fmt.Errorf("error retrieving the current directory: %v", err) - } - if !strings.HasSuffix(current, testsDirPath) { - return nil, fmt.Errorf("error running tests under directory: %s. "+ - "Please follow the instructions in the README.", current) - } - - queue = append(queue, filepath.Join(current, "testcases")) - for len(queue) > 0 { - curDir := queue[0] - if len(queue) == 1 { - queue = []string{} - } else { - queue = queue[1:] - } - - fileInfos, err := ioutil.ReadDir(curDir) - if err != nil { - return nil, fmt.Errorf("error reading directory %q: %v", curDir, err) - } - // Loop through all the files under current directory. - for _, fileInfo := range fileInfos { - if fileInfo.IsDir() { - queue = append(queue, filepath.Join(curDir, fileInfo.Name())) - continue - } - // If any file under this directory is a YAML file, it means that - // we've reached the test values files and we should record the - // relative path. - if strings.HasSuffix(fileInfo.Name(), ".yaml") && fileInfo.Name() != "environments.yaml" { - relativePaths = append(relativePaths, strings.Split(curDir, "testcases/")[1]) - break - } - } - } - - return relativePaths, nil -} - -func parseYamlToStringMap(filePath string, result map[string]string) error { - bytes, err := ioutil.ReadFile(filePath) - if err != nil { - return fmt.Errorf("error reading file '%s': %v", filePath, err) - } - err = yaml.Unmarshal(bytes, &result) - if err != nil { - return fmt.Errorf("error unmarshaling file '%s': %v", filePath, err) - } - return nil -} - -func hasTestFile(testCasePath, fileName string) (string, bool) { - filePath := filepath.Join(testCasePath, fileName) - if _, err := os.Stat(filePath); err != nil { - return filePath, false - } - return filePath, true -} - -func finalizeValues(randomId string, envValues map[string]string, testValues map[string]string) (map[string]string, error) { - realValues := make(map[string]string) - for key, value := range testValues { - if !strings.HasPrefix(value, "$") { - return nil, fmt.Errorf("test value for setter %q is %q, expect a reference, e.g. $ENV_VAR", key, value) - } - realValue := "" - ok := false - if re.MatchString(value) { - submatch := re.FindStringSubmatch(value) - if len(submatch) == 0 { - return nil, fmt.Errorf("env var name is invalid in test value %q", value) - } - subexpNames := re.SubexpNames() - for i, name := range subexpNames { - if name == "EnvName" { - prefix, ok := envValues[submatch[i]] - if !ok { - return nil, fmt.Errorf("couldn't find the env var %q", submatch[i]) - } - realValue = fmt.Sprintf("%s-%s", prefix, randomId) - break - } - } - } else { - realValue, ok = envValues[strings.TrimPrefix(value, "$")] - if !ok { - return nil, fmt.Errorf("couldn't find the env var %q", strings.TrimPrefix(value, "$")) - } - } - realValues[key] = realValue - } - - return realValues, nil -} - -func runKptTestcase(solutionPath string, timeout string, testValues map[string]string, originalValues map[string]string) error { - // Set the kpt setters defined in the testcase. - log.Println("======Setting the kpt setters...======") - for key, value := range testValues { - output, err := exec.Command("kpt", "cfg", "set", solutionPath, key, - value, "--set-by", "test").CombinedOutput() - if err != nil { - log.Printf("stderr:\n%v\nstdout:\n%s\n", err, string(output)) - errToReturn := fmt.Errorf("error setting setter '%s' with value "+ - "'%s': %v\nstdout: %s", key, value, err, string(output)) - - // Clean up before exit with errors. - if err := resetKptSetters(solutionPath, originalValues); err != nil { - return concatErrors( - "error resetting kpt setters before exiting", - err, errToReturn) - } - - return errToReturn - } - log.Printf("%s\n", string(output)) - } - log.Println("======Successfully set the kpt setters======") - - // Apply all the resources. - log.Println("======Creating the resources...======") - output, err := exec.Command("kubectl", "create", "-f", solutionPath).CombinedOutput() - if err != nil { - log.Printf("stderr:\n%v\nstdout:\n%s\n", err, string(output)) - errToReturn := fmt.Errorf("error creating resources: %v\nstdout: %s", err, string(output)) - - // Clean up before exit with errors. - if err := cleanUp(solutionPath, originalValues); err != nil { - return concatErrors( - "error cleanning up resources before exiting", - err, errToReturn) - } - return errToReturn - } - log.Printf("%s\n", string(output)) - log.Println("======Successfully created the resources======") - - // Wait for all the resources to be ready. - if err := verifyReadyCondition(solutionPath, timeout); err != nil { - errToReturn := fmt.Errorf("error verifying the ready condition: %v", err) - - // Clean up before exit with errors. - if err := cleanUp(solutionPath, originalValues); err != nil { - return concatErrors( - "error cleanning up resources before exiting", - err, errToReturn) - } - return errToReturn - } - - // Clean up. - return cleanUp(solutionPath, originalValues) -} - -func cleanUp(solutionPath string, originalValues map[string]string) error { - resourceErr := deleteResources(solutionPath) - setterErr := resetKptSetters(solutionPath, originalValues) - if resourceErr != nil || setterErr != nil { - return concatErrors( - fmt.Sprintf("error cleanning up the test for solution %q. "+ - "Please manually delete the Config Connector resources and "+ - "reset the kpt setters", solutionPath), - resourceErr, setterErr) - } - return nil -} - -func verifyReadyCondition(solutionPath string, timeout string) error { - log.Println("======Verifying that all the Config Connector resources are ready...======") - - files, err := ioutil.ReadDir(solutionPath) - if err != nil { - return fmt.Errorf("error reading solution directory %q: %v", solutionPath, err) - } - - for _, file := range files { - // We should only verify the YAML config files for Config Connector - // resources. - fileName := file.Name() - if !isResourceYamlFile(fileName) { - continue - } - - resourceFilePath := filepath.Join(solutionPath, fileName) - output, err := exec.Command("kubectl", "wait", "--for=condition=ready", - "-f", resourceFilePath, fmt.Sprintf("--timeout=%s", timeout)).CombinedOutput() - if err != nil { - log.Printf("stderr:\n%v\nstdout:\n%s\n", err, string(output)) - errToReturn := fmt.Errorf("resource in file %q is not ready in %s: %v\nstdout: %s", fileName, timeout, err, string(output)) - status, err := getSolutionResourceStatus(solutionPath) - if err != nil { - return concatErrors("error printing resource status", err, errToReturn) - } - - return fmt.Errorf("%v\nResource status:\n%s", errToReturn, status) - } - log.Printf("%s\n", string(output)) - - } - - log.Println("======All the Config Connector resrouces are ready======") - return nil -} - -func isResourceYamlFile(fileName string) bool { - return strings.HasSuffix(fileName, yamlFileSuffix) && - !strings.Contains(fileName, "namespace") && - !strings.Contains(fileName, "secret") -} - -func getSolutionResourceStatus(solutionPath string) (string, error) { - output, err := exec.Command("kubectl", "get", "-f", solutionPath, - "-o=custom-columns=NAME:.metadata.name,KIND:.kind,CONDITION.REASON:.status.conditions[0].reason,CONDITION.MESSAGE:.status.conditions[0].message"). - CombinedOutput() - - if err != nil { - log.Printf("stderr:\n%v\nstdout:\n%s\n", err, string(output)) - return "", fmt.Errorf("error getting the status of the resource(s): %v\nstdout: %s", err, string(output)) - } - - return string(output), nil -} - -func deleteResources(solutionPath string) error { - log.Println("======Deleting the resources...======") - output, err := exec.Command("kubectl", "delete", "-f", solutionPath, "--wait").CombinedOutput() - if err != nil { - log.Printf("stderr:\n%v\nstdout:\n%s\n", err, string(output)) - err = fmt.Errorf("error deleting resources: %v\nstdout: %s", err, string(output)) - if isNotFoundErrorOnly(err) { - log.Println(err) - log.Println("======Finished deleting the resources======") - return nil - } - return fmt.Errorf("error deleting resources: %v\nstdout: %s", err, string(output)) - } - log.Printf("%s\n", output) - log.Println("======Successfully deleted the resources======") - return nil -} - -func isNotFoundErrorOnly(err error) bool { - numErrors := strings.Count(err.Error(), "Error from server") - numNotFoundErrors := strings.Count(err.Error(), "Error from server (NotFound)") - return numErrors == numNotFoundErrors -} - -func resetKptSetters(solutionPath string, originalValues map[string]string) error { - log.Println("======Resetting the kpt setters...======") - for key, value := range originalValues { - setByUser := "PLACEHOLDER" - if !strings.HasPrefix(value, "$") { - setByUser = "package-default" - } - - output, err := exec.Command("kpt", "cfg", "set", solutionPath, key, value, "--set-by", setByUser).CombinedOutput() - if err != nil { - log.Printf("stderr:\n%v\nstdout:\n%s\n", err, string(output)) - return fmt.Errorf("error setting setter '%s' back to the original value '%s': %v\nstdout: %s", key, value, err, string(output)) - } - log.Printf("%s\n", string(output)) - } - log.Println("======Successfully reset the kpt setters======") - return nil -} - -func concatErrors(msg string, errs ...error) error { - errToReturn := errors.New(msg) - for _, err := range errs { - if err == nil { - continue - } - errToReturn = fmt.Errorf("%v:%v", errToReturn, err) - } - return errToReturn -} diff --git a/config-connector/tests/ccs-test/go.mod b/config-connector/tests/ccs-test/go.mod deleted file mode 100644 index 4b463a7c038..00000000000 --- a/config-connector/tests/ccs-test/go.mod +++ /dev/null @@ -1,9 +0,0 @@ -go 1.13 - -require ( - github.com/ghodss/yaml v1.0.0 - github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b - github.com/spf13/cobra v1.0.0 -) - -module github.com/GoogleCloudPlatform/cloud-foundation-toolkit/config-connector/tests/ccs-test diff --git a/config-connector/tests/ccs-test/go.sum b/config-connector/tests/ccs-test/go.sum deleted file mode 100644 index c21d6d34ef8..00000000000 --- a/config-connector/tests/ccs-test/go.sum +++ /dev/null @@ -1,133 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8= -github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/config-connector/tests/ccs-test/main.go b/config-connector/tests/ccs-test/main.go deleted file mode 100644 index 2ba5f7ca198..00000000000 --- a/config-connector/tests/ccs-test/main.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "github.com/golang/glog" - "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/config-connector/tests/ccs-test/cmd" -) - -func main() { - defer glog.Flush() - - cmd.Execute() -} diff --git a/config-connector/tests/ccs-test/util/util.go b/config-connector/tests/ccs-test/util/util.go deleted file mode 100644 index 14a96d952f5..00000000000 --- a/config-connector/tests/ccs-test/util/util.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import ( - "crypto/rand" - "encoding/base32" - "fmt" - "strings" -) - -// Generates a random ID containing [2-7][a-z] (base32 alphabets) of length 4. -func GenerateRandomizedSuffix() (string, error) { - // 3 bytes will generate base32 encoded string of length 5. - b := make([]byte, 3) - _, err := rand.Read(b) - if err != nil { - return "", fmt.Errorf("error generating random bytes: %v", err) - } - - return strings.ToLower(base32.StdEncoding.EncodeToString(b)[0:4]), nil -} \ No newline at end of file diff --git a/config-connector/tests/ccs-test/vendor/github.com/ghodss/yaml/.gitignore b/config-connector/tests/ccs-test/vendor/github.com/ghodss/yaml/.gitignore deleted file mode 100644 index e256a31e00a..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/ghodss/yaml/.gitignore +++ /dev/null @@ -1,20 +0,0 @@ -# OSX leaves these everywhere on SMB shares -._* - -# Eclipse files -.classpath -.project -.settings/** - -# Emacs save files -*~ - -# Vim-related files -[._]*.s[a-w][a-z] -[._]s[a-w][a-z] -*.un~ -Session.vim -.netrwhist - -# Go test binaries -*.test diff --git a/config-connector/tests/ccs-test/vendor/github.com/ghodss/yaml/.travis.yml b/config-connector/tests/ccs-test/vendor/github.com/ghodss/yaml/.travis.yml deleted file mode 100644 index 0e9d6edc010..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/ghodss/yaml/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go -go: - - 1.3 - - 1.4 -script: - - go test - - go build diff --git a/config-connector/tests/ccs-test/vendor/github.com/ghodss/yaml/LICENSE b/config-connector/tests/ccs-test/vendor/github.com/ghodss/yaml/LICENSE deleted file mode 100644 index 7805d36de73..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/ghodss/yaml/LICENSE +++ /dev/null @@ -1,50 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Sam Ghods - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/config-connector/tests/ccs-test/vendor/github.com/ghodss/yaml/README.md b/config-connector/tests/ccs-test/vendor/github.com/ghodss/yaml/README.md deleted file mode 100644 index 0200f75b4d1..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/ghodss/yaml/README.md +++ /dev/null @@ -1,121 +0,0 @@ -# YAML marshaling and unmarshaling support for Go - -[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml) - -## Introduction - -A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs. - -In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/). - -## Compatibility - -This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility). - -## Caveats - -**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example: - -``` -BAD: - exampleKey: !!binary gIGC - -GOOD: - exampleKey: gIGC -... and decode the base64 data in your code. -``` - -**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys. - -## Installation and usage - -To install, run: - -``` -$ go get github.com/ghodss/yaml -``` - -And import using: - -``` -import "github.com/ghodss/yaml" -``` - -Usage is very similar to the JSON library: - -```go -package main - -import ( - "fmt" - - "github.com/ghodss/yaml" -) - -type Person struct { - Name string `json:"name"` // Affects YAML field names too. - Age int `json:"age"` -} - -func main() { - // Marshal a Person struct to YAML. - p := Person{"John", 30} - y, err := yaml.Marshal(p) - if err != nil { - fmt.Printf("err: %v\n", err) - return - } - fmt.Println(string(y)) - /* Output: - age: 30 - name: John - */ - - // Unmarshal the YAML back into a Person struct. - var p2 Person - err = yaml.Unmarshal(y, &p2) - if err != nil { - fmt.Printf("err: %v\n", err) - return - } - fmt.Println(p2) - /* Output: - {John 30} - */ -} -``` - -`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available: - -```go -package main - -import ( - "fmt" - - "github.com/ghodss/yaml" -) - -func main() { - j := []byte(`{"name": "John", "age": 30}`) - y, err := yaml.JSONToYAML(j) - if err != nil { - fmt.Printf("err: %v\n", err) - return - } - fmt.Println(string(y)) - /* Output: - name: John - age: 30 - */ - j2, err := yaml.YAMLToJSON(y) - if err != nil { - fmt.Printf("err: %v\n", err) - return - } - fmt.Println(string(j2)) - /* Output: - {"age":30,"name":"John"} - */ -} -``` diff --git a/config-connector/tests/ccs-test/vendor/github.com/ghodss/yaml/fields.go b/config-connector/tests/ccs-test/vendor/github.com/ghodss/yaml/fields.go deleted file mode 100644 index 58600740266..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/ghodss/yaml/fields.go +++ /dev/null @@ -1,501 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -package yaml - -import ( - "bytes" - "encoding" - "encoding/json" - "reflect" - "sort" - "strings" - "sync" - "unicode" - "unicode/utf8" -) - -// indirect walks down v allocating pointers as needed, -// until it gets to a non-pointer. -// if it encounters an Unmarshaler, indirect stops and returns that. -// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. -func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { - // If v is a named type and is addressable, - // start with its address, so that if the type has pointer methods, - // we find them. - if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { - v = v.Addr() - } - for { - // Load value from interface, but only if the result will be - // usefully addressable. - if v.Kind() == reflect.Interface && !v.IsNil() { - e := v.Elem() - if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { - v = e - continue - } - } - - if v.Kind() != reflect.Ptr { - break - } - - if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { - break - } - if v.IsNil() { - if v.CanSet() { - v.Set(reflect.New(v.Type().Elem())) - } else { - v = reflect.New(v.Type().Elem()) - } - } - if v.Type().NumMethod() > 0 { - if u, ok := v.Interface().(json.Unmarshaler); ok { - return u, nil, reflect.Value{} - } - if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { - return nil, u, reflect.Value{} - } - } - v = v.Elem() - } - return nil, nil, v -} - -// A field represents a single field found in a struct. -type field struct { - name string - nameBytes []byte // []byte(name) - equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent - - tag bool - index []int - typ reflect.Type - omitEmpty bool - quoted bool -} - -func fillField(f field) field { - f.nameBytes = []byte(f.name) - f.equalFold = foldFunc(f.nameBytes) - return f -} - -// byName sorts field by name, breaking ties with depth, -// then breaking ties with "name came from json tag", then -// breaking ties with index sequence. -type byName []field - -func (x byName) Len() int { return len(x) } - -func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byName) Less(i, j int) bool { - if x[i].name != x[j].name { - return x[i].name < x[j].name - } - if len(x[i].index) != len(x[j].index) { - return len(x[i].index) < len(x[j].index) - } - if x[i].tag != x[j].tag { - return x[i].tag - } - return byIndex(x).Less(i, j) -} - -// byIndex sorts field by index sequence. -type byIndex []field - -func (x byIndex) Len() int { return len(x) } - -func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byIndex) Less(i, j int) bool { - for k, xik := range x[i].index { - if k >= len(x[j].index) { - return false - } - if xik != x[j].index[k] { - return xik < x[j].index[k] - } - } - return len(x[i].index) < len(x[j].index) -} - -// typeFields returns a list of fields that JSON should recognize for the given type. -// The algorithm is breadth-first search over the set of structs to include - the top struct -// and then any reachable anonymous structs. -func typeFields(t reflect.Type) []field { - // Anonymous fields to explore at the current level and the next. - current := []field{} - next := []field{{typ: t}} - - // Count of queued names for current level and the next. - count := map[reflect.Type]int{} - nextCount := map[reflect.Type]int{} - - // Types already visited at an earlier level. - visited := map[reflect.Type]bool{} - - // Fields found. - var fields []field - - for len(next) > 0 { - current, next = next, current[:0] - count, nextCount = nextCount, map[reflect.Type]int{} - - for _, f := range current { - if visited[f.typ] { - continue - } - visited[f.typ] = true - - // Scan f.typ for fields to include. - for i := 0; i < f.typ.NumField(); i++ { - sf := f.typ.Field(i) - if sf.PkgPath != "" { // unexported - continue - } - tag := sf.Tag.Get("json") - if tag == "-" { - continue - } - name, opts := parseTag(tag) - if !isValidTag(name) { - name = "" - } - index := make([]int, len(f.index)+1) - copy(index, f.index) - index[len(f.index)] = i - - ft := sf.Type - if ft.Name() == "" && ft.Kind() == reflect.Ptr { - // Follow pointer. - ft = ft.Elem() - } - - // Record found field and index sequence. - if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { - tagged := name != "" - if name == "" { - name = sf.Name - } - fields = append(fields, fillField(field{ - name: name, - tag: tagged, - index: index, - typ: ft, - omitEmpty: opts.Contains("omitempty"), - quoted: opts.Contains("string"), - })) - if count[f.typ] > 1 { - // If there were multiple instances, add a second, - // so that the annihilation code will see a duplicate. - // It only cares about the distinction between 1 or 2, - // so don't bother generating any more copies. - fields = append(fields, fields[len(fields)-1]) - } - continue - } - - // Record new anonymous struct to explore in next round. - nextCount[ft]++ - if nextCount[ft] == 1 { - next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) - } - } - } - } - - sort.Sort(byName(fields)) - - // Delete all fields that are hidden by the Go rules for embedded fields, - // except that fields with JSON tags are promoted. - - // The fields are sorted in primary order of name, secondary order - // of field index length. Loop over names; for each name, delete - // hidden fields by choosing the one dominant field that survives. - out := fields[:0] - for advance, i := 0, 0; i < len(fields); i += advance { - // One iteration per name. - // Find the sequence of fields with the name of this first field. - fi := fields[i] - name := fi.name - for advance = 1; i+advance < len(fields); advance++ { - fj := fields[i+advance] - if fj.name != name { - break - } - } - if advance == 1 { // Only one field with this name - out = append(out, fi) - continue - } - dominant, ok := dominantField(fields[i : i+advance]) - if ok { - out = append(out, dominant) - } - } - - fields = out - sort.Sort(byIndex(fields)) - - return fields -} - -// dominantField looks through the fields, all of which are known to -// have the same name, to find the single field that dominates the -// others using Go's embedding rules, modified by the presence of -// JSON tags. If there are multiple top-level fields, the boolean -// will be false: This condition is an error in Go and we skip all -// the fields. -func dominantField(fields []field) (field, bool) { - // The fields are sorted in increasing index-length order. The winner - // must therefore be one with the shortest index length. Drop all - // longer entries, which is easy: just truncate the slice. - length := len(fields[0].index) - tagged := -1 // Index of first tagged field. - for i, f := range fields { - if len(f.index) > length { - fields = fields[:i] - break - } - if f.tag { - if tagged >= 0 { - // Multiple tagged fields at the same level: conflict. - // Return no field. - return field{}, false - } - tagged = i - } - } - if tagged >= 0 { - return fields[tagged], true - } - // All remaining fields have the same length. If there's more than one, - // we have a conflict (two fields named "X" at the same level) and we - // return no field. - if len(fields) > 1 { - return field{}, false - } - return fields[0], true -} - -var fieldCache struct { - sync.RWMutex - m map[reflect.Type][]field -} - -// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. -func cachedTypeFields(t reflect.Type) []field { - fieldCache.RLock() - f := fieldCache.m[t] - fieldCache.RUnlock() - if f != nil { - return f - } - - // Compute fields without lock. - // Might duplicate effort but won't hold other computations back. - f = typeFields(t) - if f == nil { - f = []field{} - } - - fieldCache.Lock() - if fieldCache.m == nil { - fieldCache.m = map[reflect.Type][]field{} - } - fieldCache.m[t] = f - fieldCache.Unlock() - return f -} - -func isValidTag(s string) bool { - if s == "" { - return false - } - for _, c := range s { - switch { - case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): - // Backslash and quote chars are reserved, but - // otherwise any punctuation chars are allowed - // in a tag name. - default: - if !unicode.IsLetter(c) && !unicode.IsDigit(c) { - return false - } - } - } - return true -} - -const ( - caseMask = ^byte(0x20) // Mask to ignore case in ASCII. - kelvin = '\u212a' - smallLongEss = '\u017f' -) - -// foldFunc returns one of four different case folding equivalence -// functions, from most general (and slow) to fastest: -// -// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 -// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') -// 3) asciiEqualFold, no special, but includes non-letters (including _) -// 4) simpleLetterEqualFold, no specials, no non-letters. -// -// The letters S and K are special because they map to 3 runes, not just 2: -// * S maps to s and to U+017F 'Åŋ' Latin small letter long s -// * k maps to K and to U+212A 'â„Ē' Kelvin sign -// See http://play.golang.org/p/tTxjOc0OGo -// -// The returned function is specialized for matching against s and -// should only be given s. It's not curried for performance reasons. -func foldFunc(s []byte) func(s, t []byte) bool { - nonLetter := false - special := false // special letter - for _, b := range s { - if b >= utf8.RuneSelf { - return bytes.EqualFold - } - upper := b & caseMask - if upper < 'A' || upper > 'Z' { - nonLetter = true - } else if upper == 'K' || upper == 'S' { - // See above for why these letters are special. - special = true - } - } - if special { - return equalFoldRight - } - if nonLetter { - return asciiEqualFold - } - return simpleLetterEqualFold -} - -// equalFoldRight is a specialization of bytes.EqualFold when s is -// known to be all ASCII (including punctuation), but contains an 's', -// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. -// See comments on foldFunc. -func equalFoldRight(s, t []byte) bool { - for _, sb := range s { - if len(t) == 0 { - return false - } - tb := t[0] - if tb < utf8.RuneSelf { - if sb != tb { - sbUpper := sb & caseMask - if 'A' <= sbUpper && sbUpper <= 'Z' { - if sbUpper != tb&caseMask { - return false - } - } else { - return false - } - } - t = t[1:] - continue - } - // sb is ASCII and t is not. t must be either kelvin - // sign or long s; sb must be s, S, k, or K. - tr, size := utf8.DecodeRune(t) - switch sb { - case 's', 'S': - if tr != smallLongEss { - return false - } - case 'k', 'K': - if tr != kelvin { - return false - } - default: - return false - } - t = t[size:] - - } - if len(t) > 0 { - return false - } - return true -} - -// asciiEqualFold is a specialization of bytes.EqualFold for use when -// s is all ASCII (but may contain non-letters) and contains no -// special-folding letters. -// See comments on foldFunc. -func asciiEqualFold(s, t []byte) bool { - if len(s) != len(t) { - return false - } - for i, sb := range s { - tb := t[i] - if sb == tb { - continue - } - if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { - if sb&caseMask != tb&caseMask { - return false - } - } else { - return false - } - } - return true -} - -// simpleLetterEqualFold is a specialization of bytes.EqualFold for -// use when s is all ASCII letters (no underscores, etc) and also -// doesn't contain 'k', 'K', 's', or 'S'. -// See comments on foldFunc. -func simpleLetterEqualFold(s, t []byte) bool { - if len(s) != len(t) { - return false - } - for i, b := range s { - if b&caseMask != t[i]&caseMask { - return false - } - } - return true -} - -// tagOptions is the string following a comma in a struct field's "json" -// tag, or the empty string. It does not include the leading comma. -type tagOptions string - -// parseTag splits a struct field's json tag into its name and -// comma-separated options. -func parseTag(tag string) (string, tagOptions) { - if idx := strings.Index(tag, ","); idx != -1 { - return tag[:idx], tagOptions(tag[idx+1:]) - } - return tag, tagOptions("") -} - -// Contains reports whether a comma-separated list of options -// contains a particular substr flag. substr must be surrounded by a -// string boundary or commas. -func (o tagOptions) Contains(optionName string) bool { - if len(o) == 0 { - return false - } - s := string(o) - for s != "" { - var next string - i := strings.Index(s, ",") - if i >= 0 { - s, next = s[:i], s[i+1:] - } - if s == optionName { - return true - } - s = next - } - return false -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/ghodss/yaml/yaml.go b/config-connector/tests/ccs-test/vendor/github.com/ghodss/yaml/yaml.go deleted file mode 100644 index 4fb4054a8b7..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/ghodss/yaml/yaml.go +++ /dev/null @@ -1,277 +0,0 @@ -package yaml - -import ( - "bytes" - "encoding/json" - "fmt" - "reflect" - "strconv" - - "gopkg.in/yaml.v2" -) - -// Marshals the object into JSON then converts JSON to YAML and returns the -// YAML. -func Marshal(o interface{}) ([]byte, error) { - j, err := json.Marshal(o) - if err != nil { - return nil, fmt.Errorf("error marshaling into JSON: %v", err) - } - - y, err := JSONToYAML(j) - if err != nil { - return nil, fmt.Errorf("error converting JSON to YAML: %v", err) - } - - return y, nil -} - -// Converts YAML to JSON then uses JSON to unmarshal into an object. -func Unmarshal(y []byte, o interface{}) error { - vo := reflect.ValueOf(o) - j, err := yamlToJSON(y, &vo) - if err != nil { - return fmt.Errorf("error converting YAML to JSON: %v", err) - } - - err = json.Unmarshal(j, o) - if err != nil { - return fmt.Errorf("error unmarshaling JSON: %v", err) - } - - return nil -} - -// Convert JSON to YAML. -func JSONToYAML(j []byte) ([]byte, error) { - // Convert the JSON to an object. - var jsonObj interface{} - // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the - // Go JSON library doesn't try to pick the right number type (int, float, - // etc.) when unmarshalling to interface{}, it just picks float64 - // universally. go-yaml does go through the effort of picking the right - // number type, so we can preserve number type throughout this process. - err := yaml.Unmarshal(j, &jsonObj) - if err != nil { - return nil, err - } - - // Marshal this object into YAML. - return yaml.Marshal(jsonObj) -} - -// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through -// this method should be a no-op. -// -// Things YAML can do that are not supported by JSON: -// * In YAML you can have binary and null keys in your maps. These are invalid -// in JSON. (int and float keys are converted to strings.) -// * Binary data in YAML with the !!binary tag is not supported. If you want to -// use binary data with this library, encode the data as base64 as usual but do -// not use the !!binary tag in your YAML. This will ensure the original base64 -// encoded data makes it all the way through to the JSON. -func YAMLToJSON(y []byte) ([]byte, error) { - return yamlToJSON(y, nil) -} - -func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) { - // Convert the YAML to an object. - var yamlObj interface{} - err := yaml.Unmarshal(y, &yamlObj) - if err != nil { - return nil, err - } - - // YAML objects are not completely compatible with JSON objects (e.g. you - // can have non-string keys in YAML). So, convert the YAML-compatible object - // to a JSON-compatible object, failing with an error if irrecoverable - // incompatibilties happen along the way. - jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget) - if err != nil { - return nil, err - } - - // Convert this object to JSON and return the data. - return json.Marshal(jsonObj) -} - -func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) { - var err error - - // Resolve jsonTarget to a concrete value (i.e. not a pointer or an - // interface). We pass decodingNull as false because we're not actually - // decoding into the value, we're just checking if the ultimate target is a - // string. - if jsonTarget != nil { - ju, tu, pv := indirect(*jsonTarget, false) - // We have a JSON or Text Umarshaler at this level, so we can't be trying - // to decode into a string. - if ju != nil || tu != nil { - jsonTarget = nil - } else { - jsonTarget = &pv - } - } - - // If yamlObj is a number or a boolean, check if jsonTarget is a string - - // if so, coerce. Else return normal. - // If yamlObj is a map or array, find the field that each key is - // unmarshaling to, and when you recurse pass the reflect.Value for that - // field back into this function. - switch typedYAMLObj := yamlObj.(type) { - case map[interface{}]interface{}: - // JSON does not support arbitrary keys in a map, so we must convert - // these keys to strings. - // - // From my reading of go-yaml v2 (specifically the resolve function), - // keys can only have the types string, int, int64, float64, binary - // (unsupported), or null (unsupported). - strMap := make(map[string]interface{}) - for k, v := range typedYAMLObj { - // Resolve the key to a string first. - var keyString string - switch typedKey := k.(type) { - case string: - keyString = typedKey - case int: - keyString = strconv.Itoa(typedKey) - case int64: - // go-yaml will only return an int64 as a key if the system - // architecture is 32-bit and the key's value is between 32-bit - // and 64-bit. Otherwise the key type will simply be int. - keyString = strconv.FormatInt(typedKey, 10) - case float64: - // Stolen from go-yaml to use the same conversion to string as - // the go-yaml library uses to convert float to string when - // Marshaling. - s := strconv.FormatFloat(typedKey, 'g', -1, 32) - switch s { - case "+Inf": - s = ".inf" - case "-Inf": - s = "-.inf" - case "NaN": - s = ".nan" - } - keyString = s - case bool: - if typedKey { - keyString = "true" - } else { - keyString = "false" - } - default: - return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v", - reflect.TypeOf(k), k, v) - } - - // jsonTarget should be a struct or a map. If it's a struct, find - // the field it's going to map to and pass its reflect.Value. If - // it's a map, find the element type of the map and pass the - // reflect.Value created from that type. If it's neither, just pass - // nil - JSON conversion will error for us if it's a real issue. - if jsonTarget != nil { - t := *jsonTarget - if t.Kind() == reflect.Struct { - keyBytes := []byte(keyString) - // Find the field that the JSON library would use. - var f *field - fields := cachedTypeFields(t.Type()) - for i := range fields { - ff := &fields[i] - if bytes.Equal(ff.nameBytes, keyBytes) { - f = ff - break - } - // Do case-insensitive comparison. - if f == nil && ff.equalFold(ff.nameBytes, keyBytes) { - f = ff - } - } - if f != nil { - // Find the reflect.Value of the most preferential - // struct field. - jtf := t.Field(f.index[0]) - strMap[keyString], err = convertToJSONableObject(v, &jtf) - if err != nil { - return nil, err - } - continue - } - } else if t.Kind() == reflect.Map { - // Create a zero value of the map's element type to use as - // the JSON target. - jtv := reflect.Zero(t.Type().Elem()) - strMap[keyString], err = convertToJSONableObject(v, &jtv) - if err != nil { - return nil, err - } - continue - } - } - strMap[keyString], err = convertToJSONableObject(v, nil) - if err != nil { - return nil, err - } - } - return strMap, nil - case []interface{}: - // We need to recurse into arrays in case there are any - // map[interface{}]interface{}'s inside and to convert any - // numbers to strings. - - // If jsonTarget is a slice (which it really should be), find the - // thing it's going to map to. If it's not a slice, just pass nil - // - JSON conversion will error for us if it's a real issue. - var jsonSliceElemValue *reflect.Value - if jsonTarget != nil { - t := *jsonTarget - if t.Kind() == reflect.Slice { - // By default slices point to nil, but we need a reflect.Value - // pointing to a value of the slice type, so we create one here. - ev := reflect.Indirect(reflect.New(t.Type().Elem())) - jsonSliceElemValue = &ev - } - } - - // Make and use a new array. - arr := make([]interface{}, len(typedYAMLObj)) - for i, v := range typedYAMLObj { - arr[i], err = convertToJSONableObject(v, jsonSliceElemValue) - if err != nil { - return nil, err - } - } - return arr, nil - default: - // If the target type is a string and the YAML type is a number, - // convert the YAML type to a string. - if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String { - // Based on my reading of go-yaml, it may return int, int64, - // float64, or uint64. - var s string - switch typedVal := typedYAMLObj.(type) { - case int: - s = strconv.FormatInt(int64(typedVal), 10) - case int64: - s = strconv.FormatInt(typedVal, 10) - case float64: - s = strconv.FormatFloat(typedVal, 'g', -1, 32) - case uint64: - s = strconv.FormatUint(typedVal, 10) - case bool: - if typedVal { - s = "true" - } else { - s = "false" - } - } - if len(s) > 0 { - yamlObj = interface{}(s) - } - } - return yamlObj, nil - } - - return nil, nil -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/golang/glog/LICENSE b/config-connector/tests/ccs-test/vendor/github.com/golang/glog/LICENSE deleted file mode 100644 index 37ec93a14fd..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/golang/glog/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/config-connector/tests/ccs-test/vendor/github.com/golang/glog/README b/config-connector/tests/ccs-test/vendor/github.com/golang/glog/README deleted file mode 100644 index 387b4eb6890..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/golang/glog/README +++ /dev/null @@ -1,44 +0,0 @@ -glog -==== - -Leveled execution logs for Go. - -This is an efficient pure Go implementation of leveled logs in the -manner of the open source C++ package - https://github.com/google/glog - -By binding methods to booleans it is possible to use the log package -without paying the expense of evaluating the arguments to the log. -Through the -vmodule flag, the package also provides fine-grained -control over logging at the file level. - -The comment from glog.go introduces the ideas: - - Package glog implements logging analogous to the Google-internal - C++ INFO/ERROR/V setup. It provides functions Info, Warning, - Error, Fatal, plus formatting variants such as Infof. It - also provides V-style logging controlled by the -v and - -vmodule=file=2 flags. - - Basic examples: - - glog.Info("Prepare to repel boarders") - - glog.Fatalf("Initialization failed: %s", err) - - See the documentation for the V function for an explanation - of these examples: - - if glog.V(2) { - glog.Info("Starting transaction...") - } - - glog.V(2).Infoln("Processed", nItems, "elements") - - -The repository contains an open source version of the log package -used inside Google. The master copy of the source lives inside -Google, not here. The code in this repo is for export only and is not itself -under development. Feature requests will be ignored. - -Send bug reports to golang-nuts@googlegroups.com. diff --git a/config-connector/tests/ccs-test/vendor/github.com/golang/glog/glog.go b/config-connector/tests/ccs-test/vendor/github.com/golang/glog/glog.go deleted file mode 100644 index 54bd7afdcab..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/golang/glog/glog.go +++ /dev/null @@ -1,1180 +0,0 @@ -// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ -// -// Copyright 2013 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. -// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as -// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags. -// -// Basic examples: -// -// glog.Info("Prepare to repel boarders") -// -// glog.Fatalf("Initialization failed: %s", err) -// -// See the documentation for the V function for an explanation of these examples: -// -// if glog.V(2) { -// glog.Info("Starting transaction...") -// } -// -// glog.V(2).Infoln("Processed", nItems, "elements") -// -// Log output is buffered and written periodically using Flush. Programs -// should call Flush before exiting to guarantee all log output is written. -// -// By default, all log statements write to files in a temporary directory. -// This package provides several flags that modify this behavior. -// As a result, flag.Parse must be called before any logging is done. -// -// -logtostderr=false -// Logs are written to standard error instead of to files. -// -alsologtostderr=false -// Logs are written to standard error as well as to files. -// -stderrthreshold=ERROR -// Log events at or above this severity are logged to standard -// error as well as to files. -// -log_dir="" -// Log files will be written to this directory instead of the -// default temporary directory. -// -// Other flags provide aids to debugging. -// -// -log_backtrace_at="" -// When set to a file and line number holding a logging statement, -// such as -// -log_backtrace_at=gopherflakes.go:234 -// a stack trace will be written to the Info log whenever execution -// hits that statement. (Unlike with -vmodule, the ".go" must be -// present.) -// -v=0 -// Enable V-leveled logging at the specified level. -// -vmodule="" -// The syntax of the argument is a comma-separated list of pattern=N, -// where pattern is a literal file name (minus the ".go" suffix) or -// "glob" pattern and N is a V level. For instance, -// -vmodule=gopher*=3 -// sets the V level to 3 in all Go files whose names begin "gopher". -// -package glog - -import ( - "bufio" - "bytes" - "errors" - "flag" - "fmt" - "io" - stdLog "log" - "os" - "path/filepath" - "runtime" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" -) - -// severity identifies the sort of log: info, warning etc. It also implements -// the flag.Value interface. The -stderrthreshold flag is of type severity and -// should be modified only through the flag.Value interface. The values match -// the corresponding constants in C++. -type severity int32 // sync/atomic int32 - -// These constants identify the log levels in order of increasing severity. -// A message written to a high-severity log file is also written to each -// lower-severity log file. -const ( - infoLog severity = iota - warningLog - errorLog - fatalLog - numSeverity = 4 -) - -const severityChar = "IWEF" - -var severityName = []string{ - infoLog: "INFO", - warningLog: "WARNING", - errorLog: "ERROR", - fatalLog: "FATAL", -} - -// get returns the value of the severity. -func (s *severity) get() severity { - return severity(atomic.LoadInt32((*int32)(s))) -} - -// set sets the value of the severity. -func (s *severity) set(val severity) { - atomic.StoreInt32((*int32)(s), int32(val)) -} - -// String is part of the flag.Value interface. -func (s *severity) String() string { - return strconv.FormatInt(int64(*s), 10) -} - -// Get is part of the flag.Value interface. -func (s *severity) Get() interface{} { - return *s -} - -// Set is part of the flag.Value interface. -func (s *severity) Set(value string) error { - var threshold severity - // Is it a known name? - if v, ok := severityByName(value); ok { - threshold = v - } else { - v, err := strconv.Atoi(value) - if err != nil { - return err - } - threshold = severity(v) - } - logging.stderrThreshold.set(threshold) - return nil -} - -func severityByName(s string) (severity, bool) { - s = strings.ToUpper(s) - for i, name := range severityName { - if name == s { - return severity(i), true - } - } - return 0, false -} - -// OutputStats tracks the number of output lines and bytes written. -type OutputStats struct { - lines int64 - bytes int64 -} - -// Lines returns the number of lines written. -func (s *OutputStats) Lines() int64 { - return atomic.LoadInt64(&s.lines) -} - -// Bytes returns the number of bytes written. -func (s *OutputStats) Bytes() int64 { - return atomic.LoadInt64(&s.bytes) -} - -// Stats tracks the number of lines of output and number of bytes -// per severity level. Values must be read with atomic.LoadInt64. -var Stats struct { - Info, Warning, Error OutputStats -} - -var severityStats = [numSeverity]*OutputStats{ - infoLog: &Stats.Info, - warningLog: &Stats.Warning, - errorLog: &Stats.Error, -} - -// Level is exported because it appears in the arguments to V and is -// the type of the v flag, which can be set programmatically. -// It's a distinct type because we want to discriminate it from logType. -// Variables of type level are only changed under logging.mu. -// The -v flag is read only with atomic ops, so the state of the logging -// module is consistent. - -// Level is treated as a sync/atomic int32. - -// Level specifies a level of verbosity for V logs. *Level implements -// flag.Value; the -v flag is of type Level and should be modified -// only through the flag.Value interface. -type Level int32 - -// get returns the value of the Level. -func (l *Level) get() Level { - return Level(atomic.LoadInt32((*int32)(l))) -} - -// set sets the value of the Level. -func (l *Level) set(val Level) { - atomic.StoreInt32((*int32)(l), int32(val)) -} - -// String is part of the flag.Value interface. -func (l *Level) String() string { - return strconv.FormatInt(int64(*l), 10) -} - -// Get is part of the flag.Value interface. -func (l *Level) Get() interface{} { - return *l -} - -// Set is part of the flag.Value interface. -func (l *Level) Set(value string) error { - v, err := strconv.Atoi(value) - if err != nil { - return err - } - logging.mu.Lock() - defer logging.mu.Unlock() - logging.setVState(Level(v), logging.vmodule.filter, false) - return nil -} - -// moduleSpec represents the setting of the -vmodule flag. -type moduleSpec struct { - filter []modulePat -} - -// modulePat contains a filter for the -vmodule flag. -// It holds a verbosity level and a file pattern to match. -type modulePat struct { - pattern string - literal bool // The pattern is a literal string - level Level -} - -// match reports whether the file matches the pattern. It uses a string -// comparison if the pattern contains no metacharacters. -func (m *modulePat) match(file string) bool { - if m.literal { - return file == m.pattern - } - match, _ := filepath.Match(m.pattern, file) - return match -} - -func (m *moduleSpec) String() string { - // Lock because the type is not atomic. TODO: clean this up. - logging.mu.Lock() - defer logging.mu.Unlock() - var b bytes.Buffer - for i, f := range m.filter { - if i > 0 { - b.WriteRune(',') - } - fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) - } - return b.String() -} - -// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the -// struct is not exported. -func (m *moduleSpec) Get() interface{} { - return nil -} - -var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") - -// Syntax: -vmodule=recordio=2,file=1,gfs*=3 -func (m *moduleSpec) Set(value string) error { - var filter []modulePat - for _, pat := range strings.Split(value, ",") { - if len(pat) == 0 { - // Empty strings such as from a trailing comma can be ignored. - continue - } - patLev := strings.Split(pat, "=") - if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { - return errVmoduleSyntax - } - pattern := patLev[0] - v, err := strconv.Atoi(patLev[1]) - if err != nil { - return errors.New("syntax error: expect comma-separated list of filename=N") - } - if v < 0 { - return errors.New("negative value for vmodule level") - } - if v == 0 { - continue // Ignore. It's harmless but no point in paying the overhead. - } - // TODO: check syntax of filter? - filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)}) - } - logging.mu.Lock() - defer logging.mu.Unlock() - logging.setVState(logging.verbosity, filter, true) - return nil -} - -// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters -// that require filepath.Match to be called to match the pattern. -func isLiteral(pattern string) bool { - return !strings.ContainsAny(pattern, `\*?[]`) -} - -// traceLocation represents the setting of the -log_backtrace_at flag. -type traceLocation struct { - file string - line int -} - -// isSet reports whether the trace location has been specified. -// logging.mu is held. -func (t *traceLocation) isSet() bool { - return t.line > 0 -} - -// match reports whether the specified file and line matches the trace location. -// The argument file name is the full path, not the basename specified in the flag. -// logging.mu is held. -func (t *traceLocation) match(file string, line int) bool { - if t.line != line { - return false - } - if i := strings.LastIndex(file, "/"); i >= 0 { - file = file[i+1:] - } - return t.file == file -} - -func (t *traceLocation) String() string { - // Lock because the type is not atomic. TODO: clean this up. - logging.mu.Lock() - defer logging.mu.Unlock() - return fmt.Sprintf("%s:%d", t.file, t.line) -} - -// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the -// struct is not exported -func (t *traceLocation) Get() interface{} { - return nil -} - -var errTraceSyntax = errors.New("syntax error: expect file.go:234") - -// Syntax: -log_backtrace_at=gopherflakes.go:234 -// Note that unlike vmodule the file extension is included here. -func (t *traceLocation) Set(value string) error { - if value == "" { - // Unset. - t.line = 0 - t.file = "" - } - fields := strings.Split(value, ":") - if len(fields) != 2 { - return errTraceSyntax - } - file, line := fields[0], fields[1] - if !strings.Contains(file, ".") { - return errTraceSyntax - } - v, err := strconv.Atoi(line) - if err != nil { - return errTraceSyntax - } - if v <= 0 { - return errors.New("negative or zero value for level") - } - logging.mu.Lock() - defer logging.mu.Unlock() - t.line = v - t.file = file - return nil -} - -// flushSyncWriter is the interface satisfied by logging destinations. -type flushSyncWriter interface { - Flush() error - Sync() error - io.Writer -} - -func init() { - flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files") - flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files") - flag.Var(&logging.verbosity, "v", "log level for V logs") - flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") - flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") - flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") - - // Default stderrThreshold is ERROR. - logging.stderrThreshold = errorLog - - logging.setVState(0, nil, false) - go logging.flushDaemon() -} - -// Flush flushes all pending log I/O. -func Flush() { - logging.lockAndFlushAll() -} - -// loggingT collects all the global state of the logging setup. -type loggingT struct { - // Boolean flags. Not handled atomically because the flag.Value interface - // does not let us avoid the =true, and that shorthand is necessary for - // compatibility. TODO: does this matter enough to fix? Seems unlikely. - toStderr bool // The -logtostderr flag. - alsoToStderr bool // The -alsologtostderr flag. - - // Level flag. Handled atomically. - stderrThreshold severity // The -stderrthreshold flag. - - // freeList is a list of byte buffers, maintained under freeListMu. - freeList *buffer - // freeListMu maintains the free list. It is separate from the main mutex - // so buffers can be grabbed and printed to without holding the main lock, - // for better parallelization. - freeListMu sync.Mutex - - // mu protects the remaining elements of this structure and is - // used to synchronize logging. - mu sync.Mutex - // file holds writer for each of the log types. - file [numSeverity]flushSyncWriter - // pcs is used in V to avoid an allocation when computing the caller's PC. - pcs [1]uintptr - // vmap is a cache of the V Level for each V() call site, identified by PC. - // It is wiped whenever the vmodule flag changes state. - vmap map[uintptr]Level - // filterLength stores the length of the vmodule filter chain. If greater - // than zero, it means vmodule is enabled. It may be read safely - // using sync.LoadInt32, but is only modified under mu. - filterLength int32 - // traceLocation is the state of the -log_backtrace_at flag. - traceLocation traceLocation - // These flags are modified only under lock, although verbosity may be fetched - // safely using atomic.LoadInt32. - vmodule moduleSpec // The state of the -vmodule flag. - verbosity Level // V logging level, the value of the -v flag/ -} - -// buffer holds a byte Buffer for reuse. The zero value is ready for use. -type buffer struct { - bytes.Buffer - tmp [64]byte // temporary byte array for creating headers. - next *buffer -} - -var logging loggingT - -// setVState sets a consistent state for V logging. -// l.mu is held. -func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) { - // Turn verbosity off so V will not fire while we are in transition. - logging.verbosity.set(0) - // Ditto for filter length. - atomic.StoreInt32(&logging.filterLength, 0) - - // Set the new filters and wipe the pc->Level map if the filter has changed. - if setFilter { - logging.vmodule.filter = filter - logging.vmap = make(map[uintptr]Level) - } - - // Things are consistent now, so enable filtering and verbosity. - // They are enabled in order opposite to that in V. - atomic.StoreInt32(&logging.filterLength, int32(len(filter))) - logging.verbosity.set(verbosity) -} - -// getBuffer returns a new, ready-to-use buffer. -func (l *loggingT) getBuffer() *buffer { - l.freeListMu.Lock() - b := l.freeList - if b != nil { - l.freeList = b.next - } - l.freeListMu.Unlock() - if b == nil { - b = new(buffer) - } else { - b.next = nil - b.Reset() - } - return b -} - -// putBuffer returns a buffer to the free list. -func (l *loggingT) putBuffer(b *buffer) { - if b.Len() >= 256 { - // Let big buffers die a natural death. - return - } - l.freeListMu.Lock() - b.next = l.freeList - l.freeList = b - l.freeListMu.Unlock() -} - -var timeNow = time.Now // Stubbed out for testing. - -/* -header formats a log header as defined by the C++ implementation. -It returns a buffer containing the formatted header and the user's file and line number. -The depth specifies how many stack frames above lives the source line to be identified in the log message. - -Log lines have this form: - Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg... -where the fields are defined as follows: - L A single character, representing the log level (eg 'I' for INFO) - mm The month (zero padded; ie May is '05') - dd The day (zero padded) - hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds - threadid The space-padded thread ID as returned by GetTID() - file The file name - line The line number - msg The user-supplied message -*/ -func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { - _, file, line, ok := runtime.Caller(3 + depth) - if !ok { - file = "???" - line = 1 - } else { - slash := strings.LastIndex(file, "/") - if slash >= 0 { - file = file[slash+1:] - } - } - return l.formatHeader(s, file, line), file, line -} - -// formatHeader formats a log header using the provided file name and line number. -func (l *loggingT) formatHeader(s severity, file string, line int) *buffer { - now := timeNow() - if line < 0 { - line = 0 // not a real line number, but acceptable to someDigits - } - if s > fatalLog { - s = infoLog // for safety. - } - buf := l.getBuffer() - - // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. - // It's worth about 3X. Fprintf is hard. - _, month, day := now.Date() - hour, minute, second := now.Clock() - // Lmmdd hh:mm:ss.uuuuuu threadid file:line] - buf.tmp[0] = severityChar[s] - buf.twoDigits(1, int(month)) - buf.twoDigits(3, day) - buf.tmp[5] = ' ' - buf.twoDigits(6, hour) - buf.tmp[8] = ':' - buf.twoDigits(9, minute) - buf.tmp[11] = ':' - buf.twoDigits(12, second) - buf.tmp[14] = '.' - buf.nDigits(6, 15, now.Nanosecond()/1000, '0') - buf.tmp[21] = ' ' - buf.nDigits(7, 22, pid, ' ') // TODO: should be TID - buf.tmp[29] = ' ' - buf.Write(buf.tmp[:30]) - buf.WriteString(file) - buf.tmp[0] = ':' - n := buf.someDigits(1, line) - buf.tmp[n+1] = ']' - buf.tmp[n+2] = ' ' - buf.Write(buf.tmp[:n+3]) - return buf -} - -// Some custom tiny helper functions to print the log header efficiently. - -const digits = "0123456789" - -// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i]. -func (buf *buffer) twoDigits(i, d int) { - buf.tmp[i+1] = digits[d%10] - d /= 10 - buf.tmp[i] = digits[d%10] -} - -// nDigits formats an n-digit integer at buf.tmp[i], -// padding with pad on the left. -// It assumes d >= 0. -func (buf *buffer) nDigits(n, i, d int, pad byte) { - j := n - 1 - for ; j >= 0 && d > 0; j-- { - buf.tmp[i+j] = digits[d%10] - d /= 10 - } - for ; j >= 0; j-- { - buf.tmp[i+j] = pad - } -} - -// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i]. -func (buf *buffer) someDigits(i, d int) int { - // Print into the top, then copy down. We know there's space for at least - // a 10-digit number. - j := len(buf.tmp) - for { - j-- - buf.tmp[j] = digits[d%10] - d /= 10 - if d == 0 { - break - } - } - return copy(buf.tmp[i:], buf.tmp[j:]) -} - -func (l *loggingT) println(s severity, args ...interface{}) { - buf, file, line := l.header(s, 0) - fmt.Fprintln(buf, args...) - l.output(s, buf, file, line, false) -} - -func (l *loggingT) print(s severity, args ...interface{}) { - l.printDepth(s, 1, args...) -} - -func (l *loggingT) printDepth(s severity, depth int, args ...interface{}) { - buf, file, line := l.header(s, depth) - fmt.Fprint(buf, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, false) -} - -func (l *loggingT) printf(s severity, format string, args ...interface{}) { - buf, file, line := l.header(s, 0) - fmt.Fprintf(buf, format, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, false) -} - -// printWithFileLine behaves like print but uses the provided file and line number. If -// alsoLogToStderr is true, the log message always appears on standard error; it -// will also appear in the log file unless --logtostderr is set. -func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) { - buf := l.formatHeader(s, file, line) - fmt.Fprint(buf, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, alsoToStderr) -} - -// output writes the data to the log files and releases the buffer. -func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) { - l.mu.Lock() - if l.traceLocation.isSet() { - if l.traceLocation.match(file, line) { - buf.Write(stacks(false)) - } - } - data := buf.Bytes() - if !flag.Parsed() { - os.Stderr.Write([]byte("ERROR: logging before flag.Parse: ")) - os.Stderr.Write(data) - } else if l.toStderr { - os.Stderr.Write(data) - } else { - if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { - os.Stderr.Write(data) - } - if l.file[s] == nil { - if err := l.createFiles(s); err != nil { - os.Stderr.Write(data) // Make sure the message appears somewhere. - l.exit(err) - } - } - switch s { - case fatalLog: - l.file[fatalLog].Write(data) - fallthrough - case errorLog: - l.file[errorLog].Write(data) - fallthrough - case warningLog: - l.file[warningLog].Write(data) - fallthrough - case infoLog: - l.file[infoLog].Write(data) - } - } - if s == fatalLog { - // If we got here via Exit rather than Fatal, print no stacks. - if atomic.LoadUint32(&fatalNoStacks) > 0 { - l.mu.Unlock() - timeoutFlush(10 * time.Second) - os.Exit(1) - } - // Dump all goroutine stacks before exiting. - // First, make sure we see the trace for the current goroutine on standard error. - // If -logtostderr has been specified, the loop below will do that anyway - // as the first stack in the full dump. - if !l.toStderr { - os.Stderr.Write(stacks(false)) - } - // Write the stack trace for all goroutines to the files. - trace := stacks(true) - logExitFunc = func(error) {} // If we get a write error, we'll still exit below. - for log := fatalLog; log >= infoLog; log-- { - if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. - f.Write(trace) - } - } - l.mu.Unlock() - timeoutFlush(10 * time.Second) - os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. - } - l.putBuffer(buf) - l.mu.Unlock() - if stats := severityStats[s]; stats != nil { - atomic.AddInt64(&stats.lines, 1) - atomic.AddInt64(&stats.bytes, int64(len(data))) - } -} - -// timeoutFlush calls Flush and returns when it completes or after timeout -// elapses, whichever happens first. This is needed because the hooks invoked -// by Flush may deadlock when glog.Fatal is called from a hook that holds -// a lock. -func timeoutFlush(timeout time.Duration) { - done := make(chan bool, 1) - go func() { - Flush() // calls logging.lockAndFlushAll() - done <- true - }() - select { - case <-done: - case <-time.After(timeout): - fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout) - } -} - -// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines. -func stacks(all bool) []byte { - // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. - n := 10000 - if all { - n = 100000 - } - var trace []byte - for i := 0; i < 5; i++ { - trace = make([]byte, n) - nbytes := runtime.Stack(trace, all) - if nbytes < len(trace) { - return trace[:nbytes] - } - n *= 2 - } - return trace -} - -// logExitFunc provides a simple mechanism to override the default behavior -// of exiting on error. Used in testing and to guarantee we reach a required exit -// for fatal logs. Instead, exit could be a function rather than a method but that -// would make its use clumsier. -var logExitFunc func(error) - -// exit is called if there is trouble creating or writing log files. -// It flushes the logs and exits the program; there's no point in hanging around. -// l.mu is held. -func (l *loggingT) exit(err error) { - fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err) - // If logExitFunc is set, we do that instead of exiting. - if logExitFunc != nil { - logExitFunc(err) - return - } - l.flushAll() - os.Exit(2) -} - -// syncBuffer joins a bufio.Writer to its underlying file, providing access to the -// file's Sync method and providing a wrapper for the Write method that provides log -// file rotation. There are conflicting methods, so the file cannot be embedded. -// l.mu is held for all its methods. -type syncBuffer struct { - logger *loggingT - *bufio.Writer - file *os.File - sev severity - nbytes uint64 // The number of bytes written to this file -} - -func (sb *syncBuffer) Sync() error { - return sb.file.Sync() -} - -func (sb *syncBuffer) Write(p []byte) (n int, err error) { - if sb.nbytes+uint64(len(p)) >= MaxSize { - if err := sb.rotateFile(time.Now()); err != nil { - sb.logger.exit(err) - } - } - n, err = sb.Writer.Write(p) - sb.nbytes += uint64(n) - if err != nil { - sb.logger.exit(err) - } - return -} - -// rotateFile closes the syncBuffer's file and starts a new one. -func (sb *syncBuffer) rotateFile(now time.Time) error { - if sb.file != nil { - sb.Flush() - sb.file.Close() - } - var err error - sb.file, _, err = create(severityName[sb.sev], now) - sb.nbytes = 0 - if err != nil { - return err - } - - sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) - - // Write header. - var buf bytes.Buffer - fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) - fmt.Fprintf(&buf, "Running on machine: %s\n", host) - fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH) - fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n") - n, err := sb.file.Write(buf.Bytes()) - sb.nbytes += uint64(n) - return err -} - -// bufferSize sizes the buffer associated with each log file. It's large -// so that log records can accumulate without the logging thread blocking -// on disk I/O. The flushDaemon will block instead. -const bufferSize = 256 * 1024 - -// createFiles creates all the log files for severity from sev down to infoLog. -// l.mu is held. -func (l *loggingT) createFiles(sev severity) error { - now := time.Now() - // Files are created in decreasing severity order, so as soon as we find one - // has already been created, we can stop. - for s := sev; s >= infoLog && l.file[s] == nil; s-- { - sb := &syncBuffer{ - logger: l, - sev: s, - } - if err := sb.rotateFile(now); err != nil { - return err - } - l.file[s] = sb - } - return nil -} - -const flushInterval = 30 * time.Second - -// flushDaemon periodically flushes the log file buffers. -func (l *loggingT) flushDaemon() { - for _ = range time.NewTicker(flushInterval).C { - l.lockAndFlushAll() - } -} - -// lockAndFlushAll is like flushAll but locks l.mu first. -func (l *loggingT) lockAndFlushAll() { - l.mu.Lock() - l.flushAll() - l.mu.Unlock() -} - -// flushAll flushes all the logs and attempts to "sync" their data to disk. -// l.mu is held. -func (l *loggingT) flushAll() { - // Flush from fatal down, in case there's trouble flushing. - for s := fatalLog; s >= infoLog; s-- { - file := l.file[s] - if file != nil { - file.Flush() // ignore error - file.Sync() // ignore error - } - } -} - -// CopyStandardLogTo arranges for messages written to the Go "log" package's -// default logs to also appear in the Google logs for the named and lower -// severities. Subsequent changes to the standard log's default output location -// or format may break this behavior. -// -// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not -// recognized, CopyStandardLogTo panics. -func CopyStandardLogTo(name string) { - sev, ok := severityByName(name) - if !ok { - panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name)) - } - // Set a log format that captures the user's file and line: - // d.go:23: message - stdLog.SetFlags(stdLog.Lshortfile) - stdLog.SetOutput(logBridge(sev)) -} - -// logBridge provides the Write method that enables CopyStandardLogTo to connect -// Go's standard logs to the logs provided by this package. -type logBridge severity - -// Write parses the standard logging line and passes its components to the -// logger for severity(lb). -func (lb logBridge) Write(b []byte) (n int, err error) { - var ( - file = "???" - line = 1 - text string - ) - // Split "d.go:23: message" into "d.go", "23", and "message". - if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 { - text = fmt.Sprintf("bad log format: %s", b) - } else { - file = string(parts[0]) - text = string(parts[2][1:]) // skip leading space - line, err = strconv.Atoi(string(parts[1])) - if err != nil { - text = fmt.Sprintf("bad line number: %s", b) - line = 1 - } - } - // printWithFileLine with alsoToStderr=true, so standard log messages - // always appear on standard error. - logging.printWithFileLine(severity(lb), file, line, true, text) - return len(b), nil -} - -// setV computes and remembers the V level for a given PC -// when vmodule is enabled. -// File pattern matching takes the basename of the file, stripped -// of its .go suffix, and uses filepath.Match, which is a little more -// general than the *? matching used in C++. -// l.mu is held. -func (l *loggingT) setV(pc uintptr) Level { - fn := runtime.FuncForPC(pc) - file, _ := fn.FileLine(pc) - // The file is something like /a/b/c/d.go. We want just the d. - if strings.HasSuffix(file, ".go") { - file = file[:len(file)-3] - } - if slash := strings.LastIndex(file, "/"); slash >= 0 { - file = file[slash+1:] - } - for _, filter := range l.vmodule.filter { - if filter.match(file) { - l.vmap[pc] = filter.level - return filter.level - } - } - l.vmap[pc] = 0 - return 0 -} - -// Verbose is a boolean type that implements Infof (like Printf) etc. -// See the documentation of V for more information. -type Verbose bool - -// V reports whether verbosity at the call site is at least the requested level. -// The returned value is a boolean of type Verbose, which implements Info, Infoln -// and Infof. These methods will write to the Info log if called. -// Thus, one may write either -// if glog.V(2) { glog.Info("log this") } -// or -// glog.V(2).Info("log this") -// The second form is shorter but the first is cheaper if logging is off because it does -// not evaluate its arguments. -// -// Whether an individual call to V generates a log record depends on the setting of -// the -v and --vmodule flags; both are off by default. If the level in the call to -// V is at least the value of -v, or of -vmodule for the source file containing the -// call, the V call will log. -func V(level Level) Verbose { - // This function tries hard to be cheap unless there's work to do. - // The fast path is two atomic loads and compares. - - // Here is a cheap but safe test to see if V logging is enabled globally. - if logging.verbosity.get() >= level { - return Verbose(true) - } - - // It's off globally but it vmodule may still be set. - // Here is another cheap but safe test to see if vmodule is enabled. - if atomic.LoadInt32(&logging.filterLength) > 0 { - // Now we need a proper lock to use the logging structure. The pcs field - // is shared so we must lock before accessing it. This is fairly expensive, - // but if V logging is enabled we're slow anyway. - logging.mu.Lock() - defer logging.mu.Unlock() - if runtime.Callers(2, logging.pcs[:]) == 0 { - return Verbose(false) - } - v, ok := logging.vmap[logging.pcs[0]] - if !ok { - v = logging.setV(logging.pcs[0]) - } - return Verbose(v >= level) - } - return Verbose(false) -} - -// Info is equivalent to the global Info function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) Info(args ...interface{}) { - if v { - logging.print(infoLog, args...) - } -} - -// Infoln is equivalent to the global Infoln function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) Infoln(args ...interface{}) { - if v { - logging.println(infoLog, args...) - } -} - -// Infof is equivalent to the global Infof function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) Infof(format string, args ...interface{}) { - if v { - logging.printf(infoLog, format, args...) - } -} - -// Info logs to the INFO log. -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Info(args ...interface{}) { - logging.print(infoLog, args...) -} - -// InfoDepth acts as Info but uses depth to determine which call frame to log. -// InfoDepth(0, "msg") is the same as Info("msg"). -func InfoDepth(depth int, args ...interface{}) { - logging.printDepth(infoLog, depth, args...) -} - -// Infoln logs to the INFO log. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Infoln(args ...interface{}) { - logging.println(infoLog, args...) -} - -// Infof logs to the INFO log. -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Infof(format string, args ...interface{}) { - logging.printf(infoLog, format, args...) -} - -// Warning logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Warning(args ...interface{}) { - logging.print(warningLog, args...) -} - -// WarningDepth acts as Warning but uses depth to determine which call frame to log. -// WarningDepth(0, "msg") is the same as Warning("msg"). -func WarningDepth(depth int, args ...interface{}) { - logging.printDepth(warningLog, depth, args...) -} - -// Warningln logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Warningln(args ...interface{}) { - logging.println(warningLog, args...) -} - -// Warningf logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Warningf(format string, args ...interface{}) { - logging.printf(warningLog, format, args...) -} - -// Error logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Error(args ...interface{}) { - logging.print(errorLog, args...) -} - -// ErrorDepth acts as Error but uses depth to determine which call frame to log. -// ErrorDepth(0, "msg") is the same as Error("msg"). -func ErrorDepth(depth int, args ...interface{}) { - logging.printDepth(errorLog, depth, args...) -} - -// Errorln logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Errorln(args ...interface{}) { - logging.println(errorLog, args...) -} - -// Errorf logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Errorf(format string, args ...interface{}) { - logging.printf(errorLog, format, args...) -} - -// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Fatal(args ...interface{}) { - logging.print(fatalLog, args...) -} - -// FatalDepth acts as Fatal but uses depth to determine which call frame to log. -// FatalDepth(0, "msg") is the same as Fatal("msg"). -func FatalDepth(depth int, args ...interface{}) { - logging.printDepth(fatalLog, depth, args...) -} - -// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Fatalln(args ...interface{}) { - logging.println(fatalLog, args...) -} - -// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Fatalf(format string, args ...interface{}) { - logging.printf(fatalLog, format, args...) -} - -// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. -// It allows Exit and relatives to use the Fatal logs. -var fatalNoStacks uint32 - -// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Exit(args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.print(fatalLog, args...) -} - -// ExitDepth acts as Exit but uses depth to determine which call frame to log. -// ExitDepth(0, "msg") is the same as Exit("msg"). -func ExitDepth(depth int, args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.printDepth(fatalLog, depth, args...) -} - -// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -func Exitln(args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.println(fatalLog, args...) -} - -// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Exitf(format string, args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.printf(fatalLog, format, args...) -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/golang/glog/glog_file.go b/config-connector/tests/ccs-test/vendor/github.com/golang/glog/glog_file.go deleted file mode 100644 index 65075d28111..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/golang/glog/glog_file.go +++ /dev/null @@ -1,124 +0,0 @@ -// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ -// -// Copyright 2013 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// File I/O for logs. - -package glog - -import ( - "errors" - "flag" - "fmt" - "os" - "os/user" - "path/filepath" - "strings" - "sync" - "time" -) - -// MaxSize is the maximum size of a log file in bytes. -var MaxSize uint64 = 1024 * 1024 * 1800 - -// logDirs lists the candidate directories for new log files. -var logDirs []string - -// If non-empty, overrides the choice of directory in which to write logs. -// See createLogDirs for the full list of possible destinations. -var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory") - -func createLogDirs() { - if *logDir != "" { - logDirs = append(logDirs, *logDir) - } - logDirs = append(logDirs, os.TempDir()) -} - -var ( - pid = os.Getpid() - program = filepath.Base(os.Args[0]) - host = "unknownhost" - userName = "unknownuser" -) - -func init() { - h, err := os.Hostname() - if err == nil { - host = shortHostname(h) - } - - current, err := user.Current() - if err == nil { - userName = current.Username - } - - // Sanitize userName since it may contain filepath separators on Windows. - userName = strings.Replace(userName, `\`, "_", -1) -} - -// shortHostname returns its argument, truncating at the first period. -// For instance, given "www.google.com" it returns "www". -func shortHostname(hostname string) string { - if i := strings.Index(hostname, "."); i >= 0 { - return hostname[:i] - } - return hostname -} - -// logName returns a new log file name containing tag, with start time t, and -// the name for the symlink for tag. -func logName(tag string, t time.Time) (name, link string) { - name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d", - program, - host, - userName, - tag, - t.Year(), - t.Month(), - t.Day(), - t.Hour(), - t.Minute(), - t.Second(), - pid) - return name, program + "." + tag -} - -var onceLogDirs sync.Once - -// create creates a new log file and returns the file and its filename, which -// contains tag ("INFO", "FATAL", etc.) and t. If the file is created -// successfully, create also attempts to update the symlink for that tag, ignoring -// errors. -func create(tag string, t time.Time) (f *os.File, filename string, err error) { - onceLogDirs.Do(createLogDirs) - if len(logDirs) == 0 { - return nil, "", errors.New("log: no log dirs") - } - name, link := logName(tag, t) - var lastErr error - for _, dir := range logDirs { - fname := filepath.Join(dir, name) - f, err := os.Create(fname) - if err == nil { - symlink := filepath.Join(dir, link) - os.Remove(symlink) // ignore err - os.Symlink(name, symlink) // ignore err - return f, fname, nil - } - lastErr = err - } - return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr) -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/inconshreveable/mousetrap/LICENSE b/config-connector/tests/ccs-test/vendor/github.com/inconshreveable/mousetrap/LICENSE deleted file mode 100644 index 5f0d1fb6a7b..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/inconshreveable/mousetrap/LICENSE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2014 Alan Shreve - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/config-connector/tests/ccs-test/vendor/github.com/inconshreveable/mousetrap/README.md b/config-connector/tests/ccs-test/vendor/github.com/inconshreveable/mousetrap/README.md deleted file mode 100644 index 7a950d1774f..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/inconshreveable/mousetrap/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# mousetrap - -mousetrap is a tiny library that answers a single question. - -On a Windows machine, was the process invoked by someone double clicking on -the executable file while browsing in explorer? - -### Motivation - -Windows developers unfamiliar with command line tools will often "double-click" -the executable for a tool. Because most CLI tools print the help and then exit -when invoked without arguments, this is often very frustrating for those users. - -mousetrap provides a way to detect these invocations so that you can provide -more helpful behavior and instructions on how to run the CLI tool. To see what -this looks like, both from an organizational and a technical perspective, see -https://inconshreveable.com/09-09-2014/sweat-the-small-stuff/ - -### The interface - -The library exposes a single interface: - - func StartedByExplorer() (bool) diff --git a/config-connector/tests/ccs-test/vendor/github.com/inconshreveable/mousetrap/trap_others.go b/config-connector/tests/ccs-test/vendor/github.com/inconshreveable/mousetrap/trap_others.go deleted file mode 100644 index 9d2d8a4bab9..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/inconshreveable/mousetrap/trap_others.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !windows - -package mousetrap - -// StartedByExplorer returns true if the program was invoked by the user -// double-clicking on the executable from explorer.exe -// -// It is conservative and returns false if any of the internal calls fail. -// It does not guarantee that the program was run from a terminal. It only can tell you -// whether it was launched from explorer.exe -// -// On non-Windows platforms, it always returns false. -func StartedByExplorer() bool { - return false -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/inconshreveable/mousetrap/trap_windows.go b/config-connector/tests/ccs-test/vendor/github.com/inconshreveable/mousetrap/trap_windows.go deleted file mode 100644 index 336142a5e3e..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/inconshreveable/mousetrap/trap_windows.go +++ /dev/null @@ -1,98 +0,0 @@ -// +build windows -// +build !go1.4 - -package mousetrap - -import ( - "fmt" - "os" - "syscall" - "unsafe" -) - -const ( - // defined by the Win32 API - th32cs_snapprocess uintptr = 0x2 -) - -var ( - kernel = syscall.MustLoadDLL("kernel32.dll") - CreateToolhelp32Snapshot = kernel.MustFindProc("CreateToolhelp32Snapshot") - Process32First = kernel.MustFindProc("Process32FirstW") - Process32Next = kernel.MustFindProc("Process32NextW") -) - -// ProcessEntry32 structure defined by the Win32 API -type processEntry32 struct { - dwSize uint32 - cntUsage uint32 - th32ProcessID uint32 - th32DefaultHeapID int - th32ModuleID uint32 - cntThreads uint32 - th32ParentProcessID uint32 - pcPriClassBase int32 - dwFlags uint32 - szExeFile [syscall.MAX_PATH]uint16 -} - -func getProcessEntry(pid int) (pe *processEntry32, err error) { - snapshot, _, e1 := CreateToolhelp32Snapshot.Call(th32cs_snapprocess, uintptr(0)) - if snapshot == uintptr(syscall.InvalidHandle) { - err = fmt.Errorf("CreateToolhelp32Snapshot: %v", e1) - return - } - defer syscall.CloseHandle(syscall.Handle(snapshot)) - - var processEntry processEntry32 - processEntry.dwSize = uint32(unsafe.Sizeof(processEntry)) - ok, _, e1 := Process32First.Call(snapshot, uintptr(unsafe.Pointer(&processEntry))) - if ok == 0 { - err = fmt.Errorf("Process32First: %v", e1) - return - } - - for { - if processEntry.th32ProcessID == uint32(pid) { - pe = &processEntry - return - } - - ok, _, e1 = Process32Next.Call(snapshot, uintptr(unsafe.Pointer(&processEntry))) - if ok == 0 { - err = fmt.Errorf("Process32Next: %v", e1) - return - } - } -} - -func getppid() (pid int, err error) { - pe, err := getProcessEntry(os.Getpid()) - if err != nil { - return - } - - pid = int(pe.th32ParentProcessID) - return -} - -// StartedByExplorer returns true if the program was invoked by the user double-clicking -// on the executable from explorer.exe -// -// It is conservative and returns false if any of the internal calls fail. -// It does not guarantee that the program was run from a terminal. It only can tell you -// whether it was launched from explorer.exe -func StartedByExplorer() bool { - ppid, err := getppid() - if err != nil { - return false - } - - pe, err := getProcessEntry(ppid) - if err != nil { - return false - } - - name := syscall.UTF16ToString(pe.szExeFile[:]) - return name == "explorer.exe" -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go b/config-connector/tests/ccs-test/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go deleted file mode 100644 index 9a28e57c3c3..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go +++ /dev/null @@ -1,46 +0,0 @@ -// +build windows -// +build go1.4 - -package mousetrap - -import ( - "os" - "syscall" - "unsafe" -) - -func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) { - snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0) - if err != nil { - return nil, err - } - defer syscall.CloseHandle(snapshot) - var procEntry syscall.ProcessEntry32 - procEntry.Size = uint32(unsafe.Sizeof(procEntry)) - if err = syscall.Process32First(snapshot, &procEntry); err != nil { - return nil, err - } - for { - if procEntry.ProcessID == uint32(pid) { - return &procEntry, nil - } - err = syscall.Process32Next(snapshot, &procEntry) - if err != nil { - return nil, err - } - } -} - -// StartedByExplorer returns true if the program was invoked by the user double-clicking -// on the executable from explorer.exe -// -// It is conservative and returns false if any of the internal calls fail. -// It does not guarantee that the program was run from a terminal. It only can tell you -// whether it was launched from explorer.exe -func StartedByExplorer() bool { - pe, err := getProcessEntry(os.Getppid()) - if err != nil { - return false - } - return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:]) -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/.gitignore b/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/.gitignore deleted file mode 100644 index c7b459e4dd0..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/.gitignore +++ /dev/null @@ -1,39 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -# Vim files https://github.com/github/gitignore/blob/master/Global/Vim.gitignore -# swap -[._]*.s[a-w][a-z] -[._]s[a-w][a-z] -# session -Session.vim -# temporary -.netrwhist -*~ -# auto-generated tag files -tags - -*.exe -cobra.test -bin - -.idea/ -*.iml diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/.mailmap b/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/.mailmap deleted file mode 100644 index 94ec53068aa..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/.mailmap +++ /dev/null @@ -1,3 +0,0 @@ -Steve Francia -Bjørn Erik Pedersen -Fabiano Franz diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/.travis.yml b/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/.travis.yml deleted file mode 100644 index a9bd4e54785..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/.travis.yml +++ /dev/null @@ -1,29 +0,0 @@ -language: go - -stages: - - diff - - test - - build - -go: - - 1.12.x - - 1.13.x - - tip - -before_install: - - go get -u github.com/kyoh86/richgo - - go get -u github.com/mitchellh/gox - -matrix: - allow_failures: - - go: tip - include: - - stage: diff - go: 1.13.x - script: make fmt - - stage: build - go: 1.13.x - script: make cobra_generator - -script: - - make test diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/LICENSE.txt b/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/LICENSE.txt deleted file mode 100644 index 298f0e2665e..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/LICENSE.txt +++ /dev/null @@ -1,174 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/Makefile b/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/Makefile deleted file mode 100644 index e9740d1e175..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/Makefile +++ /dev/null @@ -1,36 +0,0 @@ -BIN="./bin" -SRC=$(shell find . -name "*.go") - -ifeq (, $(shell which richgo)) -$(warning "could not find richgo in $(PATH), run: go get github.com/kyoh86/richgo") -endif - -.PHONY: fmt vet test cobra_generator install_deps clean - -default: all - -all: fmt vet test cobra_generator - -fmt: - $(info ******************** checking formatting ********************) - @test -z $(shell gofmt -l $(SRC)) || (gofmt -d $(SRC); exit 1) - -test: install_deps vet - $(info ******************** running tests ********************) - richgo test -v ./... - -cobra_generator: install_deps - $(info ******************** building generator ********************) - mkdir -p $(BIN) - make -C cobra all - -install_deps: - $(info ******************** downloading dependencies ********************) - go get -v ./... - -vet: - $(info ******************** vetting ********************) - go vet ./... - -clean: - rm -rf $(BIN) diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/README.md b/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/README.md deleted file mode 100644 index 9d79934260f..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/README.md +++ /dev/null @@ -1,770 +0,0 @@ -![cobra logo](https://cloud.githubusercontent.com/assets/173412/10886352/ad566232-814f-11e5-9cd0-aa101788c117.png) - -Cobra is both a library for creating powerful modern CLI applications as well as a program to generate applications and command files. - -Many of the most widely used Go projects are built using Cobra, such as: -[Kubernetes](http://kubernetes.io/), -[Hugo](http://gohugo.io), -[rkt](https://github.com/coreos/rkt), -[etcd](https://github.com/coreos/etcd), -[Moby (former Docker)](https://github.com/moby/moby), -[Docker (distribution)](https://github.com/docker/distribution), -[OpenShift](https://www.openshift.com/), -[Delve](https://github.com/derekparker/delve), -[GopherJS](http://www.gopherjs.org/), -[CockroachDB](http://www.cockroachlabs.com/), -[Bleve](http://www.blevesearch.com/), -[ProjectAtomic (enterprise)](http://www.projectatomic.io/), -[Giant Swarm's gsctl](https://github.com/giantswarm/gsctl), -[Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack), -[rclone](http://rclone.org/), -[nehm](https://github.com/bogem/nehm), -[Pouch](https://github.com/alibaba/pouch), -[Istio](https://istio.io), -[Prototool](https://github.com/uber/prototool), -[mattermost-server](https://github.com/mattermost/mattermost-server), -[Gardener](https://github.com/gardener/gardenctl), -[Linkerd](https://linkerd.io/), -[Github CLI](https://github.com/cli/cli) -etc. - -[![Build Status](https://travis-ci.org/spf13/cobra.svg "Travis CI status")](https://travis-ci.org/spf13/cobra) -[![GoDoc](https://godoc.org/github.com/spf13/cobra?status.svg)](https://godoc.org/github.com/spf13/cobra) -[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cobra)](https://goreportcard.com/report/github.com/spf13/cobra) - -# Table of Contents - -- [Overview](#overview) -- [Concepts](#concepts) - * [Commands](#commands) - * [Flags](#flags) -- [Installing](#installing) -- [Getting Started](#getting-started) - * [Using the Cobra Generator](#using-the-cobra-generator) - * [Using the Cobra Library](#using-the-cobra-library) - * [Working with Flags](#working-with-flags) - * [Positional and Custom Arguments](#positional-and-custom-arguments) - * [Example](#example) - * [Help Command](#help-command) - * [Usage Message](#usage-message) - * [PreRun and PostRun Hooks](#prerun-and-postrun-hooks) - * [Suggestions when "unknown command" happens](#suggestions-when-unknown-command-happens) - * [Generating documentation for your command](#generating-documentation-for-your-command) - * [Generating bash completions](#generating-bash-completions) - * [Generating zsh completions](#generating-zsh-completions) -- [Contributing](#contributing) -- [License](#license) - -# Overview - -Cobra is a library providing a simple interface to create powerful modern CLI -interfaces similar to git & go tools. - -Cobra is also an application that will generate your application scaffolding to rapidly -develop a Cobra-based application. - -Cobra provides: -* Easy subcommand-based CLIs: `app server`, `app fetch`, etc. -* Fully POSIX-compliant flags (including short & long versions) -* Nested subcommands -* Global, local and cascading flags -* Easy generation of applications & commands with `cobra init appname` & `cobra add cmdname` -* Intelligent suggestions (`app srver`... did you mean `app server`?) -* Automatic help generation for commands and flags -* Automatic help flag recognition of `-h`, `--help`, etc. -* Automatically generated bash autocomplete for your application -* Automatically generated man pages for your application -* Command aliases so you can change things without breaking them -* The flexibility to define your own help, usage, etc. -* Optional tight integration with [viper](http://github.com/spf13/viper) for 12-factor apps - -# Concepts - -Cobra is built on a structure of commands, arguments & flags. - -**Commands** represent actions, **Args** are things and **Flags** are modifiers for those actions. - -The best applications will read like sentences when used. Users will know how -to use the application because they will natively understand how to use it. - -The pattern to follow is -`APPNAME VERB NOUN --ADJECTIVE.` - or -`APPNAME COMMAND ARG --FLAG` - -A few good real world examples may better illustrate this point. - -In the following example, 'server' is a command, and 'port' is a flag: - - hugo server --port=1313 - -In this command we are telling Git to clone the url bare. - - git clone URL --bare - -## Commands - -Command is the central point of the application. Each interaction that -the application supports will be contained in a Command. A command can -have children commands and optionally run an action. - -In the example above, 'server' is the command. - -[More about cobra.Command](https://godoc.org/github.com/spf13/cobra#Command) - -## Flags - -A flag is a way to modify the behavior of a command. Cobra supports -fully POSIX-compliant flags as well as the Go [flag package](https://golang.org/pkg/flag/). -A Cobra command can define flags that persist through to children commands -and flags that are only available to that command. - -In the example above, 'port' is the flag. - -Flag functionality is provided by the [pflag -library](https://github.com/spf13/pflag), a fork of the flag standard library -which maintains the same interface while adding POSIX compliance. - -# Installing -Using Cobra is easy. First, use `go get` to install the latest version -of the library. This command will install the `cobra` generator executable -along with the library and its dependencies: - - go get -u github.com/spf13/cobra/cobra - -Next, include Cobra in your application: - -```go -import "github.com/spf13/cobra" -``` - -# Getting Started - -While you are welcome to provide your own organization, typically a Cobra-based -application will follow the following organizational structure: - -``` - ▾ appName/ - ▾ cmd/ - add.go - your.go - commands.go - here.go - main.go -``` - -In a Cobra app, typically the main.go file is very bare. It serves one purpose: initializing Cobra. - -```go -package main - -import ( - "{pathToYourApp}/cmd" -) - -func main() { - cmd.Execute() -} -``` - -## Using the Cobra Generator - -Cobra provides its own program that will create your application and add any -commands you want. It's the easiest way to incorporate Cobra into your application. - -[Here](https://github.com/spf13/cobra/blob/master/cobra/README.md) you can find more information about it. - -## Using the Cobra Library - -To manually implement Cobra you need to create a bare main.go file and a rootCmd file. -You will optionally provide additional commands as you see fit. - -### Create rootCmd - -Cobra doesn't require any special constructors. Simply create your commands. - -Ideally you place this in app/cmd/root.go: - -```go -var rootCmd = &cobra.Command{ - Use: "hugo", - Short: "Hugo is a very fast static site generator", - Long: `A Fast and Flexible Static Site Generator built with - love by spf13 and friends in Go. - Complete documentation is available at http://hugo.spf13.com`, - Run: func(cmd *cobra.Command, args []string) { - // Do Stuff Here - }, -} - -func Execute() { - if err := rootCmd.Execute(); err != nil { - fmt.Println(err) - os.Exit(1) - } -} -``` - -You will additionally define flags and handle configuration in your init() function. - -For example cmd/root.go: - -```go -package cmd - -import ( - "fmt" - "os" - - homedir "github.com/mitchellh/go-homedir" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -var ( - // Used for flags. - cfgFile string - userLicense string - - rootCmd = &cobra.Command{ - Use: "cobra", - Short: "A generator for Cobra based Applications", - Long: `Cobra is a CLI library for Go that empowers applications. -This application is a tool to generate the needed files -to quickly create a Cobra application.`, - } -) - -// Execute executes the root command. -func Execute() error { - return rootCmd.Execute() -} - -func init() { - cobra.OnInitialize(initConfig) - - rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)") - rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "author name for copyright attribution") - rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "name of license for the project") - rootCmd.PersistentFlags().Bool("viper", true, "use Viper for configuration") - viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) - viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper")) - viper.SetDefault("author", "NAME HERE ") - viper.SetDefault("license", "apache") - - rootCmd.AddCommand(addCmd) - rootCmd.AddCommand(initCmd) -} - -func er(msg interface{}) { - fmt.Println("Error:", msg) - os.Exit(1) -} - -func initConfig() { - if cfgFile != "" { - // Use config file from the flag. - viper.SetConfigFile(cfgFile) - } else { - // Find home directory. - home, err := homedir.Dir() - if err != nil { - er(err) - } - - // Search config in home directory with name ".cobra" (without extension). - viper.AddConfigPath(home) - viper.SetConfigName(".cobra") - } - - viper.AutomaticEnv() - - if err := viper.ReadInConfig(); err == nil { - fmt.Println("Using config file:", viper.ConfigFileUsed()) - } -} -``` - -### Create your main.go - -With the root command you need to have your main function execute it. -Execute should be run on the root for clarity, though it can be called on any command. - -In a Cobra app, typically the main.go file is very bare. It serves, one purpose, to initialize Cobra. - -```go -package main - -import ( - "{pathToYourApp}/cmd" -) - -func main() { - cmd.Execute() -} -``` - -### Create additional commands - -Additional commands can be defined and typically are each given their own file -inside of the cmd/ directory. - -If you wanted to create a version command you would create cmd/version.go and -populate it with the following: - -```go -package cmd - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func init() { - rootCmd.AddCommand(versionCmd) -} - -var versionCmd = &cobra.Command{ - Use: "version", - Short: "Print the version number of Hugo", - Long: `All software has versions. This is Hugo's`, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Hugo Static Site Generator v0.9 -- HEAD") - }, -} -``` - -## Working with Flags - -Flags provide modifiers to control how the action command operates. - -### Assign flags to a command - -Since the flags are defined and used in different locations, we need to -define a variable outside with the correct scope to assign the flag to -work with. - -```go -var Verbose bool -var Source string -``` - -There are two different approaches to assign a flag. - -### Persistent Flags - -A flag can be 'persistent' meaning that this flag will be available to the -command it's assigned to as well as every command under that command. For -global flags, assign a flag as a persistent flag on the root. - -```go -rootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output") -``` - -### Local Flags - -A flag can also be assigned locally which will only apply to that specific command. - -```go -localCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") -``` - -### Local Flag on Parent Commands - -By default Cobra only parses local flags on the target command, any local flags on -parent commands are ignored. By enabling `Command.TraverseChildren` Cobra will -parse local flags on each command before executing the target command. - -```go -command := cobra.Command{ - Use: "print [OPTIONS] [COMMANDS]", - TraverseChildren: true, -} -``` - -### Bind Flags with Config - -You can also bind your flags with [viper](https://github.com/spf13/viper): -```go -var author string - -func init() { - rootCmd.PersistentFlags().StringVar(&author, "author", "YOUR NAME", "Author name for copyright attribution") - viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) -} -``` - -In this example the persistent flag `author` is bound with `viper`. -**Note**, that the variable `author` will not be set to the value from config, -when the `--author` flag is not provided by user. - -More in [viper documentation](https://github.com/spf13/viper#working-with-flags). - -### Required flags - -Flags are optional by default. If instead you wish your command to report an error -when a flag has not been set, mark it as required: -```go -rootCmd.Flags().StringVarP(&Region, "region", "r", "", "AWS region (required)") -rootCmd.MarkFlagRequired("region") -``` - -## Positional and Custom Arguments - -Validation of positional arguments can be specified using the `Args` field -of `Command`. - -The following validators are built in: - -- `NoArgs` - the command will report an error if there are any positional args. -- `ArbitraryArgs` - the command will accept any args. -- `OnlyValidArgs` - the command will report an error if there are any positional args that are not in the `ValidArgs` field of `Command`. -- `MinimumNArgs(int)` - the command will report an error if there are not at least N positional args. -- `MaximumNArgs(int)` - the command will report an error if there are more than N positional args. -- `ExactArgs(int)` - the command will report an error if there are not exactly N positional args. -- `ExactValidArgs(int)` - the command will report an error if there are not exactly N positional args OR if there are any positional args that are not in the `ValidArgs` field of `Command` -- `RangeArgs(min, max)` - the command will report an error if the number of args is not between the minimum and maximum number of expected args. - -An example of setting the custom validator: - -```go -var cmd = &cobra.Command{ - Short: "hello", - Args: func(cmd *cobra.Command, args []string) error { - if len(args) < 1 { - return errors.New("requires a color argument") - } - if myapp.IsValidColor(args[0]) { - return nil - } - return fmt.Errorf("invalid color specified: %s", args[0]) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Hello, World!") - }, -} -``` - -## Example - -In the example below, we have defined three commands. Two are at the top level -and one (cmdTimes) is a child of one of the top commands. In this case the root -is not executable meaning that a subcommand is required. This is accomplished -by not providing a 'Run' for the 'rootCmd'. - -We have only defined one flag for a single command. - -More documentation about flags is available at https://github.com/spf13/pflag - -```go -package main - -import ( - "fmt" - "strings" - - "github.com/spf13/cobra" -) - -func main() { - var echoTimes int - - var cmdPrint = &cobra.Command{ - Use: "print [string to print]", - Short: "Print anything to the screen", - Long: `print is for printing anything back to the screen. -For many years people have printed back to the screen.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Print: " + strings.Join(args, " ")) - }, - } - - var cmdEcho = &cobra.Command{ - Use: "echo [string to echo]", - Short: "Echo anything to the screen", - Long: `echo is for echoing anything back. -Echo works a lot like print, except it has a child command.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Echo: " + strings.Join(args, " ")) - }, - } - - var cmdTimes = &cobra.Command{ - Use: "times [string to echo]", - Short: "Echo anything to the screen more times", - Long: `echo things multiple times back to the user by providing -a count and a string.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - for i := 0; i < echoTimes; i++ { - fmt.Println("Echo: " + strings.Join(args, " ")) - } - }, - } - - cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input") - - var rootCmd = &cobra.Command{Use: "app"} - rootCmd.AddCommand(cmdPrint, cmdEcho) - cmdEcho.AddCommand(cmdTimes) - rootCmd.Execute() -} -``` - -For a more complete example of a larger application, please checkout [Hugo](http://gohugo.io/). - -## Help Command - -Cobra automatically adds a help command to your application when you have subcommands. -This will be called when a user runs 'app help'. Additionally, help will also -support all other commands as input. Say, for instance, you have a command called -'create' without any additional configuration; Cobra will work when 'app help -create' is called. Every command will automatically have the '--help' flag added. - -### Example - -The following output is automatically generated by Cobra. Nothing beyond the -command and flag definitions are needed. - - $ cobra help - - Cobra is a CLI library for Go that empowers applications. - This application is a tool to generate the needed files - to quickly create a Cobra application. - - Usage: - cobra [command] - - Available Commands: - add Add a command to a Cobra Application - help Help about any command - init Initialize a Cobra Application - - Flags: - -a, --author string author name for copyright attribution (default "YOUR NAME") - --config string config file (default is $HOME/.cobra.yaml) - -h, --help help for cobra - -l, --license string name of license for the project - --viper use Viper for configuration (default true) - - Use "cobra [command] --help" for more information about a command. - - -Help is just a command like any other. There is no special logic or behavior -around it. In fact, you can provide your own if you want. - -### Defining your own help - -You can provide your own Help command or your own template for the default command to use -with following functions: - -```go -cmd.SetHelpCommand(cmd *Command) -cmd.SetHelpFunc(f func(*Command, []string)) -cmd.SetHelpTemplate(s string) -``` - -The latter two will also apply to any children commands. - -## Usage Message - -When the user provides an invalid flag or invalid command, Cobra responds by -showing the user the 'usage'. - -### Example -You may recognize this from the help above. That's because the default help -embeds the usage as part of its output. - - $ cobra --invalid - Error: unknown flag: --invalid - Usage: - cobra [command] - - Available Commands: - add Add a command to a Cobra Application - help Help about any command - init Initialize a Cobra Application - - Flags: - -a, --author string author name for copyright attribution (default "YOUR NAME") - --config string config file (default is $HOME/.cobra.yaml) - -h, --help help for cobra - -l, --license string name of license for the project - --viper use Viper for configuration (default true) - - Use "cobra [command] --help" for more information about a command. - -### Defining your own usage -You can provide your own usage function or template for Cobra to use. -Like help, the function and template are overridable through public methods: - -```go -cmd.SetUsageFunc(f func(*Command) error) -cmd.SetUsageTemplate(s string) -``` - -## Version Flag - -Cobra adds a top-level '--version' flag if the Version field is set on the root command. -Running an application with the '--version' flag will print the version to stdout using -the version template. The template can be customized using the -`cmd.SetVersionTemplate(s string)` function. - -## PreRun and PostRun Hooks - -It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherited by children if they do not declare their own. These functions are run in the following order: - -- `PersistentPreRun` -- `PreRun` -- `Run` -- `PostRun` -- `PersistentPostRun` - -An example of two commands which use all of these features is below. When the subcommand is executed, it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`: - -```go -package main - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func main() { - - var rootCmd = &cobra.Command{ - Use: "root [sub]", - Short: "My root command", - PersistentPreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args) - }, - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args) - }, - } - - var subCmd = &cobra.Command{ - Use: "sub [no options!]", - Short: "My subcommand", - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args) - }, - } - - rootCmd.AddCommand(subCmd) - - rootCmd.SetArgs([]string{""}) - rootCmd.Execute() - fmt.Println() - rootCmd.SetArgs([]string{"sub", "arg1", "arg2"}) - rootCmd.Execute() -} -``` - -Output: -``` -Inside rootCmd PersistentPreRun with args: [] -Inside rootCmd PreRun with args: [] -Inside rootCmd Run with args: [] -Inside rootCmd PostRun with args: [] -Inside rootCmd PersistentPostRun with args: [] - -Inside rootCmd PersistentPreRun with args: [arg1 arg2] -Inside subCmd PreRun with args: [arg1 arg2] -Inside subCmd Run with args: [arg1 arg2] -Inside subCmd PostRun with args: [arg1 arg2] -Inside subCmd PersistentPostRun with args: [arg1 arg2] -``` - -## Suggestions when "unknown command" happens - -Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behave similarly to the `git` command when a typo happens. For example: - -``` -$ hugo srever -Error: unknown command "srever" for "hugo" - -Did you mean this? - server - -Run 'hugo --help' for usage. -``` - -Suggestions are automatic based on every subcommand registered and use an implementation of [Levenshtein distance](http://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion. - -If you need to disable suggestions or tweak the string distance in your command, use: - -```go -command.DisableSuggestions = true -``` - -or - -```go -command.SuggestionsMinimumDistance = 1 -``` - -You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but makes sense in your set of commands and for some which you don't want aliases. Example: - -``` -$ kubectl remove -Error: unknown command "remove" for "kubectl" - -Did you mean this? - delete - -Run 'kubectl help' for usage. -``` - -## Generating documentation for your command - -Cobra can generate documentation based on subcommands, flags, etc. in the following formats: - -- [Markdown](doc/md_docs.md) -- [ReStructured Text](doc/rest_docs.md) -- [Man Page](doc/man_docs.md) - -## Generating bash completions - -Cobra can generate a bash-completion file. If you add more information to your command, these completions can be amazingly powerful and flexible. Read more about it in [Bash Completions](bash_completions.md). - -## Generating zsh completions - -Cobra can generate zsh-completion file. Read more about it in -[Zsh Completions](zsh_completions.md). - -# Contributing - -1. Fork it -2. Download your fork to your PC (`git clone https://github.com/your_username/cobra && cd cobra`) -3. Create your feature branch (`git checkout -b my-new-feature`) -4. Make changes and add them (`git add .`) -5. Commit your changes (`git commit -m 'Add some feature'`) -6. Push to the branch (`git push origin my-new-feature`) -7. Create new pull request - -# License - -Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt) diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/args.go b/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/args.go deleted file mode 100644 index 70e9b262912..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/args.go +++ /dev/null @@ -1,109 +0,0 @@ -package cobra - -import ( - "fmt" - "strings" -) - -type PositionalArgs func(cmd *Command, args []string) error - -// Legacy arg validation has the following behaviour: -// - root commands with no subcommands can take arbitrary arguments -// - root commands with subcommands will do subcommand validity checking -// - subcommands will always accept arbitrary arguments -func legacyArgs(cmd *Command, args []string) error { - // no subcommand, always take args - if !cmd.HasSubCommands() { - return nil - } - - // root command with subcommands, do subcommand checking. - if !cmd.HasParent() && len(args) > 0 { - return fmt.Errorf("unknown command %q for %q%s", args[0], cmd.CommandPath(), cmd.findSuggestions(args[0])) - } - return nil -} - -// NoArgs returns an error if any args are included. -func NoArgs(cmd *Command, args []string) error { - if len(args) > 0 { - return fmt.Errorf("unknown command %q for %q", args[0], cmd.CommandPath()) - } - return nil -} - -// OnlyValidArgs returns an error if any args are not in the list of ValidArgs. -func OnlyValidArgs(cmd *Command, args []string) error { - if len(cmd.ValidArgs) > 0 { - // Remove any description that may be included in ValidArgs. - // A description is following a tab character. - var validArgs []string - for _, v := range cmd.ValidArgs { - validArgs = append(validArgs, strings.Split(v, "\t")[0]) - } - - for _, v := range args { - if !stringInSlice(v, validArgs) { - return fmt.Errorf("invalid argument %q for %q%s", v, cmd.CommandPath(), cmd.findSuggestions(args[0])) - } - } - } - return nil -} - -// ArbitraryArgs never returns an error. -func ArbitraryArgs(cmd *Command, args []string) error { - return nil -} - -// MinimumNArgs returns an error if there is not at least N args. -func MinimumNArgs(n int) PositionalArgs { - return func(cmd *Command, args []string) error { - if len(args) < n { - return fmt.Errorf("requires at least %d arg(s), only received %d", n, len(args)) - } - return nil - } -} - -// MaximumNArgs returns an error if there are more than N args. -func MaximumNArgs(n int) PositionalArgs { - return func(cmd *Command, args []string) error { - if len(args) > n { - return fmt.Errorf("accepts at most %d arg(s), received %d", n, len(args)) - } - return nil - } -} - -// ExactArgs returns an error if there are not exactly n args. -func ExactArgs(n int) PositionalArgs { - return func(cmd *Command, args []string) error { - if len(args) != n { - return fmt.Errorf("accepts %d arg(s), received %d", n, len(args)) - } - return nil - } -} - -// ExactValidArgs returns an error if -// there are not exactly N positional args OR -// there are any positional args that are not in the `ValidArgs` field of `Command` -func ExactValidArgs(n int) PositionalArgs { - return func(cmd *Command, args []string) error { - if err := ExactArgs(n)(cmd, args); err != nil { - return err - } - return OnlyValidArgs(cmd, args) - } -} - -// RangeArgs returns an error if the number of args is not within the expected range. -func RangeArgs(min int, max int) PositionalArgs { - return func(cmd *Command, args []string) error { - if len(args) < min || len(args) > max { - return fmt.Errorf("accepts between %d and %d arg(s), received %d", min, max, len(args)) - } - return nil - } -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/bash_completions.go b/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/bash_completions.go deleted file mode 100644 index 1e27188c3d2..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/bash_completions.go +++ /dev/null @@ -1,641 +0,0 @@ -package cobra - -import ( - "bytes" - "fmt" - "io" - "os" - "sort" - "strings" - - "github.com/spf13/pflag" -) - -// Annotations for Bash completion. -const ( - BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extensions" - BashCompCustom = "cobra_annotation_bash_completion_custom" - BashCompOneRequiredFlag = "cobra_annotation_bash_completion_one_required_flag" - BashCompSubdirsInDir = "cobra_annotation_bash_completion_subdirs_in_dir" -) - -func writePreamble(buf *bytes.Buffer, name string) { - buf.WriteString(fmt.Sprintf("# bash completion for %-36s -*- shell-script -*-\n", name)) - buf.WriteString(fmt.Sprintf(` -__%[1]s_debug() -{ - if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then - echo "$*" >> "${BASH_COMP_DEBUG_FILE}" - fi -} - -# Homebrew on Macs have version 1.3 of bash-completion which doesn't include -# _init_completion. This is a very minimal version of that function. -__%[1]s_init_completion() -{ - COMPREPLY=() - _get_comp_words_by_ref "$@" cur prev words cword -} - -__%[1]s_index_of_word() -{ - local w word=$1 - shift - index=0 - for w in "$@"; do - [[ $w = "$word" ]] && return - index=$((index+1)) - done - index=-1 -} - -__%[1]s_contains_word() -{ - local w word=$1; shift - for w in "$@"; do - [[ $w = "$word" ]] && return - done - return 1 -} - -__%[1]s_handle_go_custom_completion() -{ - __%[1]s_debug "${FUNCNAME[0]}: cur is ${cur}, words[*] is ${words[*]}, #words[@] is ${#words[@]}" - - local out requestComp lastParam lastChar comp directive args - - # Prepare the command to request completions for the program. - # Calling ${words[0]} instead of directly %[1]s allows to handle aliases - args=("${words[@]:1}") - requestComp="${words[0]} %[2]s ${args[*]}" - - lastParam=${words[$((${#words[@]}-1))]} - lastChar=${lastParam:$((${#lastParam}-1)):1} - __%[1]s_debug "${FUNCNAME[0]}: lastParam ${lastParam}, lastChar ${lastChar}" - - if [ -z "${cur}" ] && [ "${lastChar}" != "=" ]; then - # If the last parameter is complete (there is a space following it) - # We add an extra empty parameter so we can indicate this to the go method. - __%[1]s_debug "${FUNCNAME[0]}: Adding extra empty parameter" - requestComp="${requestComp} \"\"" - fi - - __%[1]s_debug "${FUNCNAME[0]}: calling ${requestComp}" - # Use eval to handle any environment variables and such - out=$(eval "${requestComp}" 2>/dev/null) - - # Extract the directive integer at the very end of the output following a colon (:) - directive=${out##*:} - # Remove the directive - out=${out%%:*} - if [ "${directive}" = "${out}" ]; then - # There is not directive specified - directive=0 - fi - __%[1]s_debug "${FUNCNAME[0]}: the completion directive is: ${directive}" - __%[1]s_debug "${FUNCNAME[0]}: the completions are: ${out[*]}" - - if [ $((directive & %[3]d)) -ne 0 ]; then - # Error code. No completion. - __%[1]s_debug "${FUNCNAME[0]}: received error from custom completion go code" - return - else - if [ $((directive & %[4]d)) -ne 0 ]; then - if [[ $(type -t compopt) = "builtin" ]]; then - __%[1]s_debug "${FUNCNAME[0]}: activating no space" - compopt -o nospace - fi - fi - if [ $((directive & %[5]d)) -ne 0 ]; then - if [[ $(type -t compopt) = "builtin" ]]; then - __%[1]s_debug "${FUNCNAME[0]}: activating no file completion" - compopt +o default - fi - fi - - while IFS='' read -r comp; do - COMPREPLY+=("$comp") - done < <(compgen -W "${out[*]}" -- "$cur") - fi -} - -__%[1]s_handle_reply() -{ - __%[1]s_debug "${FUNCNAME[0]}" - local comp - case $cur in - -*) - if [[ $(type -t compopt) = "builtin" ]]; then - compopt -o nospace - fi - local allflags - if [ ${#must_have_one_flag[@]} -ne 0 ]; then - allflags=("${must_have_one_flag[@]}") - else - allflags=("${flags[*]} ${two_word_flags[*]}") - fi - while IFS='' read -r comp; do - COMPREPLY+=("$comp") - done < <(compgen -W "${allflags[*]}" -- "$cur") - if [[ $(type -t compopt) = "builtin" ]]; then - [[ "${COMPREPLY[0]}" == *= ]] || compopt +o nospace - fi - - # complete after --flag=abc - if [[ $cur == *=* ]]; then - if [[ $(type -t compopt) = "builtin" ]]; then - compopt +o nospace - fi - - local index flag - flag="${cur%%=*}" - __%[1]s_index_of_word "${flag}" "${flags_with_completion[@]}" - COMPREPLY=() - if [[ ${index} -ge 0 ]]; then - PREFIX="" - cur="${cur#*=}" - ${flags_completion[${index}]} - if [ -n "${ZSH_VERSION}" ]; then - # zsh completion needs --flag= prefix - eval "COMPREPLY=( \"\${COMPREPLY[@]/#/${flag}=}\" )" - fi - fi - fi - return 0; - ;; - esac - - # check if we are handling a flag with special work handling - local index - __%[1]s_index_of_word "${prev}" "${flags_with_completion[@]}" - if [[ ${index} -ge 0 ]]; then - ${flags_completion[${index}]} - return - fi - - # we are parsing a flag and don't have a special handler, no completion - if [[ ${cur} != "${words[cword]}" ]]; then - return - fi - - local completions - completions=("${commands[@]}") - if [[ ${#must_have_one_noun[@]} -ne 0 ]]; then - completions=("${must_have_one_noun[@]}") - elif [[ -n "${has_completion_function}" ]]; then - # if a go completion function is provided, defer to that function - completions=() - __%[1]s_handle_go_custom_completion - fi - if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then - completions+=("${must_have_one_flag[@]}") - fi - while IFS='' read -r comp; do - COMPREPLY+=("$comp") - done < <(compgen -W "${completions[*]}" -- "$cur") - - if [[ ${#COMPREPLY[@]} -eq 0 && ${#noun_aliases[@]} -gt 0 && ${#must_have_one_noun[@]} -ne 0 ]]; then - while IFS='' read -r comp; do - COMPREPLY+=("$comp") - done < <(compgen -W "${noun_aliases[*]}" -- "$cur") - fi - - if [[ ${#COMPREPLY[@]} -eq 0 ]]; then - if declare -F __%[1]s_custom_func >/dev/null; then - # try command name qualified custom func - __%[1]s_custom_func - else - # otherwise fall back to unqualified for compatibility - declare -F __custom_func >/dev/null && __custom_func - fi - fi - - # available in bash-completion >= 2, not always present on macOS - if declare -F __ltrim_colon_completions >/dev/null; then - __ltrim_colon_completions "$cur" - fi - - # If there is only 1 completion and it is a flag with an = it will be completed - # but we don't want a space after the = - if [[ "${#COMPREPLY[@]}" -eq "1" ]] && [[ $(type -t compopt) = "builtin" ]] && [[ "${COMPREPLY[0]}" == --*= ]]; then - compopt -o nospace - fi -} - -# The arguments should be in the form "ext1|ext2|extn" -__%[1]s_handle_filename_extension_flag() -{ - local ext="$1" - _filedir "@(${ext})" -} - -__%[1]s_handle_subdirs_in_dir_flag() -{ - local dir="$1" - pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return -} - -__%[1]s_handle_flag() -{ - __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" - - # if a command required a flag, and we found it, unset must_have_one_flag() - local flagname=${words[c]} - local flagvalue - # if the word contained an = - if [[ ${words[c]} == *"="* ]]; then - flagvalue=${flagname#*=} # take in as flagvalue after the = - flagname=${flagname%%=*} # strip everything after the = - flagname="${flagname}=" # but put the = back - fi - __%[1]s_debug "${FUNCNAME[0]}: looking for ${flagname}" - if __%[1]s_contains_word "${flagname}" "${must_have_one_flag[@]}"; then - must_have_one_flag=() - fi - - # if you set a flag which only applies to this command, don't show subcommands - if __%[1]s_contains_word "${flagname}" "${local_nonpersistent_flags[@]}"; then - commands=() - fi - - # keep flag value with flagname as flaghash - # flaghash variable is an associative array which is only supported in bash > 3. - if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then - if [ -n "${flagvalue}" ] ; then - flaghash[${flagname}]=${flagvalue} - elif [ -n "${words[ $((c+1)) ]}" ] ; then - flaghash[${flagname}]=${words[ $((c+1)) ]} - else - flaghash[${flagname}]="true" # pad "true" for bool flag - fi - fi - - # skip the argument to a two word flag - if [[ ${words[c]} != *"="* ]] && __%[1]s_contains_word "${words[c]}" "${two_word_flags[@]}"; then - __%[1]s_debug "${FUNCNAME[0]}: found a flag ${words[c]}, skip the next argument" - c=$((c+1)) - # if we are looking for a flags value, don't show commands - if [[ $c -eq $cword ]]; then - commands=() - fi - fi - - c=$((c+1)) - -} - -__%[1]s_handle_noun() -{ - __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" - - if __%[1]s_contains_word "${words[c]}" "${must_have_one_noun[@]}"; then - must_have_one_noun=() - elif __%[1]s_contains_word "${words[c]}" "${noun_aliases[@]}"; then - must_have_one_noun=() - fi - - nouns+=("${words[c]}") - c=$((c+1)) -} - -__%[1]s_handle_command() -{ - __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" - - local next_command - if [[ -n ${last_command} ]]; then - next_command="_${last_command}_${words[c]//:/__}" - else - if [[ $c -eq 0 ]]; then - next_command="_%[1]s_root_command" - else - next_command="_${words[c]//:/__}" - fi - fi - c=$((c+1)) - __%[1]s_debug "${FUNCNAME[0]}: looking for ${next_command}" - declare -F "$next_command" >/dev/null && $next_command -} - -__%[1]s_handle_word() -{ - if [[ $c -ge $cword ]]; then - __%[1]s_handle_reply - return - fi - __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" - if [[ "${words[c]}" == -* ]]; then - __%[1]s_handle_flag - elif __%[1]s_contains_word "${words[c]}" "${commands[@]}"; then - __%[1]s_handle_command - elif [[ $c -eq 0 ]]; then - __%[1]s_handle_command - elif __%[1]s_contains_word "${words[c]}" "${command_aliases[@]}"; then - # aliashash variable is an associative array which is only supported in bash > 3. - if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then - words[c]=${aliashash[${words[c]}]} - __%[1]s_handle_command - else - __%[1]s_handle_noun - fi - else - __%[1]s_handle_noun - fi - __%[1]s_handle_word -} - -`, name, ShellCompNoDescRequestCmd, ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp)) -} - -func writePostscript(buf *bytes.Buffer, name string) { - name = strings.Replace(name, ":", "__", -1) - buf.WriteString(fmt.Sprintf("__start_%s()\n", name)) - buf.WriteString(fmt.Sprintf(`{ - local cur prev words cword - declare -A flaghash 2>/dev/null || : - declare -A aliashash 2>/dev/null || : - if declare -F _init_completion >/dev/null 2>&1; then - _init_completion -s || return - else - __%[1]s_init_completion -n "=" || return - fi - - local c=0 - local flags=() - local two_word_flags=() - local local_nonpersistent_flags=() - local flags_with_completion=() - local flags_completion=() - local commands=("%[1]s") - local must_have_one_flag=() - local must_have_one_noun=() - local has_completion_function - local last_command - local nouns=() - - __%[1]s_handle_word -} - -`, name)) - buf.WriteString(fmt.Sprintf(`if [[ $(type -t compopt) = "builtin" ]]; then - complete -o default -F __start_%s %s -else - complete -o default -o nospace -F __start_%s %s -fi - -`, name, name, name, name)) - buf.WriteString("# ex: ts=4 sw=4 et filetype=sh\n") -} - -func writeCommands(buf *bytes.Buffer, cmd *Command) { - buf.WriteString(" commands=()\n") - for _, c := range cmd.Commands() { - if !c.IsAvailableCommand() || c == cmd.helpCommand { - continue - } - buf.WriteString(fmt.Sprintf(" commands+=(%q)\n", c.Name())) - writeCmdAliases(buf, c) - } - buf.WriteString("\n") -} - -func writeFlagHandler(buf *bytes.Buffer, name string, annotations map[string][]string, cmd *Command) { - for key, value := range annotations { - switch key { - case BashCompFilenameExt: - buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) - - var ext string - if len(value) > 0 { - ext = fmt.Sprintf("__%s_handle_filename_extension_flag ", cmd.Root().Name()) + strings.Join(value, "|") - } else { - ext = "_filedir" - } - buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", ext)) - case BashCompCustom: - buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) - if len(value) > 0 { - handlers := strings.Join(value, "; ") - buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", handlers)) - } else { - buf.WriteString(" flags_completion+=(:)\n") - } - case BashCompSubdirsInDir: - buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) - - var ext string - if len(value) == 1 { - ext = fmt.Sprintf("__%s_handle_subdirs_in_dir_flag ", cmd.Root().Name()) + value[0] - } else { - ext = "_filedir -d" - } - buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", ext)) - } - } -} - -func writeShortFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) { - name := flag.Shorthand - format := " " - if len(flag.NoOptDefVal) == 0 { - format += "two_word_" - } - format += "flags+=(\"-%s\")\n" - buf.WriteString(fmt.Sprintf(format, name)) - writeFlagHandler(buf, "-"+name, flag.Annotations, cmd) -} - -func writeFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) { - name := flag.Name - format := " flags+=(\"--%s" - if len(flag.NoOptDefVal) == 0 { - format += "=" - } - format += "\")\n" - buf.WriteString(fmt.Sprintf(format, name)) - if len(flag.NoOptDefVal) == 0 { - format = " two_word_flags+=(\"--%s\")\n" - buf.WriteString(fmt.Sprintf(format, name)) - } - writeFlagHandler(buf, "--"+name, flag.Annotations, cmd) -} - -func writeLocalNonPersistentFlag(buf *bytes.Buffer, flag *pflag.Flag) { - name := flag.Name - format := " local_nonpersistent_flags+=(\"--%s" - if len(flag.NoOptDefVal) == 0 { - format += "=" - } - format += "\")\n" - buf.WriteString(fmt.Sprintf(format, name)) -} - -// Setup annotations for go completions for registered flags -func prepareCustomAnnotationsForFlags(cmd *Command) { - for flag := range flagCompletionFunctions { - // Make sure the completion script calls the __*_go_custom_completion function for - // every registered flag. We need to do this here (and not when the flag was registered - // for completion) so that we can know the root command name for the prefix - // of ___go_custom_completion - if flag.Annotations == nil { - flag.Annotations = map[string][]string{} - } - flag.Annotations[BashCompCustom] = []string{fmt.Sprintf("__%[1]s_handle_go_custom_completion", cmd.Root().Name())} - } -} - -func writeFlags(buf *bytes.Buffer, cmd *Command) { - prepareCustomAnnotationsForFlags(cmd) - buf.WriteString(` flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - -`) - localNonPersistentFlags := cmd.LocalNonPersistentFlags() - cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { - if nonCompletableFlag(flag) { - return - } - writeFlag(buf, flag, cmd) - if len(flag.Shorthand) > 0 { - writeShortFlag(buf, flag, cmd) - } - if localNonPersistentFlags.Lookup(flag.Name) != nil { - writeLocalNonPersistentFlag(buf, flag) - } - }) - cmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) { - if nonCompletableFlag(flag) { - return - } - writeFlag(buf, flag, cmd) - if len(flag.Shorthand) > 0 { - writeShortFlag(buf, flag, cmd) - } - }) - - buf.WriteString("\n") -} - -func writeRequiredFlag(buf *bytes.Buffer, cmd *Command) { - buf.WriteString(" must_have_one_flag=()\n") - flags := cmd.NonInheritedFlags() - flags.VisitAll(func(flag *pflag.Flag) { - if nonCompletableFlag(flag) { - return - } - for key := range flag.Annotations { - switch key { - case BashCompOneRequiredFlag: - format := " must_have_one_flag+=(\"--%s" - if flag.Value.Type() != "bool" { - format += "=" - } - format += "\")\n" - buf.WriteString(fmt.Sprintf(format, flag.Name)) - - if len(flag.Shorthand) > 0 { - buf.WriteString(fmt.Sprintf(" must_have_one_flag+=(\"-%s\")\n", flag.Shorthand)) - } - } - } - }) -} - -func writeRequiredNouns(buf *bytes.Buffer, cmd *Command) { - buf.WriteString(" must_have_one_noun=()\n") - sort.Sort(sort.StringSlice(cmd.ValidArgs)) - for _, value := range cmd.ValidArgs { - // Remove any description that may be included following a tab character. - // Descriptions are not supported by bash completion. - value = strings.Split(value, "\t")[0] - buf.WriteString(fmt.Sprintf(" must_have_one_noun+=(%q)\n", value)) - } - if cmd.ValidArgsFunction != nil { - buf.WriteString(" has_completion_function=1\n") - } -} - -func writeCmdAliases(buf *bytes.Buffer, cmd *Command) { - if len(cmd.Aliases) == 0 { - return - } - - sort.Sort(sort.StringSlice(cmd.Aliases)) - - buf.WriteString(fmt.Sprint(` if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then`, "\n")) - for _, value := range cmd.Aliases { - buf.WriteString(fmt.Sprintf(" command_aliases+=(%q)\n", value)) - buf.WriteString(fmt.Sprintf(" aliashash[%q]=%q\n", value, cmd.Name())) - } - buf.WriteString(` fi`) - buf.WriteString("\n") -} -func writeArgAliases(buf *bytes.Buffer, cmd *Command) { - buf.WriteString(" noun_aliases=()\n") - sort.Sort(sort.StringSlice(cmd.ArgAliases)) - for _, value := range cmd.ArgAliases { - buf.WriteString(fmt.Sprintf(" noun_aliases+=(%q)\n", value)) - } -} - -func gen(buf *bytes.Buffer, cmd *Command) { - for _, c := range cmd.Commands() { - if !c.IsAvailableCommand() || c == cmd.helpCommand { - continue - } - gen(buf, c) - } - commandName := cmd.CommandPath() - commandName = strings.Replace(commandName, " ", "_", -1) - commandName = strings.Replace(commandName, ":", "__", -1) - - if cmd.Root() == cmd { - buf.WriteString(fmt.Sprintf("_%s_root_command()\n{\n", commandName)) - } else { - buf.WriteString(fmt.Sprintf("_%s()\n{\n", commandName)) - } - - buf.WriteString(fmt.Sprintf(" last_command=%q\n", commandName)) - buf.WriteString("\n") - buf.WriteString(" command_aliases=()\n") - buf.WriteString("\n") - - writeCommands(buf, cmd) - writeFlags(buf, cmd) - writeRequiredFlag(buf, cmd) - writeRequiredNouns(buf, cmd) - writeArgAliases(buf, cmd) - buf.WriteString("}\n\n") -} - -// GenBashCompletion generates bash completion file and writes to the passed writer. -func (c *Command) GenBashCompletion(w io.Writer) error { - buf := new(bytes.Buffer) - writePreamble(buf, c.Name()) - if len(c.BashCompletionFunction) > 0 { - buf.WriteString(c.BashCompletionFunction + "\n") - } - gen(buf, c) - writePostscript(buf, c.Name()) - - _, err := buf.WriteTo(w) - return err -} - -func nonCompletableFlag(flag *pflag.Flag) bool { - return flag.Hidden || len(flag.Deprecated) > 0 -} - -// GenBashCompletionFile generates bash completion file. -func (c *Command) GenBashCompletionFile(filename string) error { - outFile, err := os.Create(filename) - if err != nil { - return err - } - defer outFile.Close() - - return c.GenBashCompletion(outFile) -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/bash_completions.md b/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/bash_completions.md deleted file mode 100644 index e61a3a6546b..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/bash_completions.md +++ /dev/null @@ -1,383 +0,0 @@ -# Generating Bash Completions For Your Own cobra.Command - -If you are using the generator you can create a completion command by running - -```bash -cobra add completion -``` - -Update the help text show how to install the bash_completion Linux show here [Kubectl docs show mac options](https://kubernetes.io/docs/tasks/tools/install-kubectl/#enabling-shell-autocompletion) - -Writing the shell script to stdout allows the most flexible use. - -```go -// completionCmd represents the completion command -var completionCmd = &cobra.Command{ - Use: "completion", - Short: "Generates bash completion scripts", - Long: `To load completion run - -. <(bitbucket completion) - -To configure your bash shell to load completions for each session add to your bashrc - -# ~/.bashrc or ~/.profile -. <(bitbucket completion) -`, - Run: func(cmd *cobra.Command, args []string) { - rootCmd.GenBashCompletion(os.Stdout); - }, -} -``` - -**Note:** The cobra generator may include messages printed to stdout for example if the config file is loaded, this will break the auto complete script - - -## Example from kubectl - -Generating bash completions from a cobra command is incredibly easy. An actual program which does so for the kubernetes kubectl binary is as follows: - -```go -package main - -import ( - "io/ioutil" - "os" - - "k8s.io/kubernetes/pkg/kubectl/cmd" - "k8s.io/kubernetes/pkg/kubectl/cmd/util" -) - -func main() { - kubectl := cmd.NewKubectlCommand(util.NewFactory(nil), os.Stdin, ioutil.Discard, ioutil.Discard) - kubectl.GenBashCompletionFile("out.sh") -} -``` - -`out.sh` will get you completions of subcommands and flags. Copy it to `/etc/bash_completion.d/` as described [here](https://debian-administration.org/article/316/An_introduction_to_bash_completion_part_1) and reset your terminal to use autocompletion. If you make additional annotations to your code, you can get even more intelligent and flexible behavior. - -## Have the completions code complete your 'nouns' - -### Static completion of nouns - -This method allows you to provide a pre-defined list of completion choices for your nouns using the `validArgs` field. -For example, if you want `kubectl get [tab][tab]` to show a list of valid "nouns" you have to set them. Simplified code from `kubectl get` looks like: - -```go -validArgs []string = { "pod", "node", "service", "replicationcontroller" } - -cmd := &cobra.Command{ - Use: "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)", - Short: "Display one or many resources", - Long: get_long, - Example: get_example, - Run: func(cmd *cobra.Command, args []string) { - err := RunGet(f, out, cmd, args) - util.CheckErr(err) - }, - ValidArgs: validArgs, -} -``` - -Notice we put the "ValidArgs" on the "get" subcommand. Doing so will give results like - -```bash -# kubectl get [tab][tab] -node pod replicationcontroller service -``` - -### Plural form and shortcuts for nouns - -If your nouns have a number of aliases, you can define them alongside `ValidArgs` using `ArgAliases`: - -```go -argAliases []string = { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" } - -cmd := &cobra.Command{ - ... - ValidArgs: validArgs, - ArgAliases: argAliases -} -``` - -The aliases are not shown to the user on tab completion, but they are accepted as valid nouns by -the completion algorithm if entered manually, e.g. in: - -```bash -# kubectl get rc [tab][tab] -backend frontend database -``` - -Note that without declaring `rc` as an alias, the completion algorithm would show the list of nouns -in this example again instead of the replication controllers. - -### Dynamic completion of nouns - -In some cases it is not possible to provide a list of possible completions in advance. Instead, the list of completions must be determined at execution-time. Cobra provides two ways of defining such dynamic completion of nouns. Note that both these methods can be used along-side each other as long as they are not both used for the same command. - -**Note**: *Custom Completions written in Go* will automatically work for other shell-completion scripts (e.g., Fish shell), while *Custom Completions written in Bash* will only work for Bash shell-completion. It is therefore recommended to use *Custom Completions written in Go*. - -#### 1. Custom completions of nouns written in Go - -In a similar fashion as for static completions, you can use the `ValidArgsFunction` field to provide a Go function that Cobra will execute when it needs the list of completion choices for the nouns of a command. Note that either `ValidArgs` or `ValidArgsFunction` can be used for a single cobra command, but not both. -Simplified code from `helm status` looks like: - -```go -cmd := &cobra.Command{ - Use: "status RELEASE_NAME", - Short: "Display the status of the named release", - Long: status_long, - RunE: func(cmd *cobra.Command, args []string) { - RunGet(args[0]) - }, - ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - if len(args) != 0 { - return nil, cobra.ShellCompDirectiveNoFileComp - } - return getReleasesFromCluster(toComplete), cobra.ShellCompDirectiveNoFileComp - }, -} -``` -Where `getReleasesFromCluster()` is a Go function that obtains the list of current Helm releases running on the Kubernetes cluster. -Notice we put the `ValidArgsFunction` on the `status` subcommand. Let's assume the Helm releases on the cluster are: `harbor`, `notary`, `rook` and `thanos` then this dynamic completion will give results like - -```bash -# helm status [tab][tab] -harbor notary rook thanos -``` -You may have noticed the use of `cobra.ShellCompDirective`. These directives are bit fields allowing to control some shell completion behaviors for your particular completion. You can combine them with the bit-or operator such as `cobra.ShellCompDirectiveNoSpace | cobra.ShellCompDirectiveNoFileComp` -```go -// Indicates an error occurred and completions should be ignored. -ShellCompDirectiveError -// Indicates that the shell should not add a space after the completion, -// even if there is a single completion provided. -ShellCompDirectiveNoSpace -// Indicates that the shell should not provide file completion even when -// no completion is provided. -// This currently does not work for zsh or bash < 4 -ShellCompDirectiveNoFileComp -// Indicates that the shell will perform its default behavior after completions -// have been provided (this implies !ShellCompDirectiveNoSpace && !ShellCompDirectiveNoFileComp). -ShellCompDirectiveDefault -``` - -When using the `ValidArgsFunction`, Cobra will call your registered function after having parsed all flags and arguments provided in the command-line. You therefore don't need to do this parsing yourself. For example, when a user calls `helm status --namespace my-rook-ns [tab][tab]`, Cobra will call your registered `ValidArgsFunction` after having parsed the `--namespace` flag, as it would have done when calling the `RunE` function. - -##### Debugging - -Cobra achieves dynamic completions written in Go through the use of a hidden command called by the completion script. To debug your Go completion code, you can call this hidden command directly: -```bash -# helm __complete status har -harbor -:4 -Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr -``` -***Important:*** If the noun to complete is empty, you must pass an empty parameter to the `__complete` command: -```bash -# helm __complete status "" -harbor -notary -rook -thanos -:4 -Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr -``` -Calling the `__complete` command directly allows you to run the Go debugger to troubleshoot your code. You can also add printouts to your code; Cobra provides the following functions to use for printouts in Go completion code: -```go -// Prints to the completion script debug file (if BASH_COMP_DEBUG_FILE -// is set to a file path) and optionally prints to stderr. -cobra.CompDebug(msg string, printToStdErr bool) { -cobra.CompDebugln(msg string, printToStdErr bool) - -// Prints to the completion script debug file (if BASH_COMP_DEBUG_FILE -// is set to a file path) and to stderr. -cobra.CompError(msg string) -cobra.CompErrorln(msg string) -``` -***Important:*** You should **not** leave traces that print to stdout in your completion code as they will be interpreted as completion choices by the completion script. Instead, use the cobra-provided debugging traces functions mentioned above. - -#### 2. Custom completions of nouns written in Bash - -This method allows you to inject bash functions into the completion script. Those bash functions are responsible for providing the completion choices for your own completions. - -Some more actual code that works in kubernetes: - -```bash -const ( - bash_completion_func = `__kubectl_parse_get() -{ - local kubectl_output out - if kubectl_output=$(kubectl get --no-headers "$1" 2>/dev/null); then - out=($(echo "${kubectl_output}" | awk '{print $1}')) - COMPREPLY=( $( compgen -W "${out[*]}" -- "$cur" ) ) - fi -} - -__kubectl_get_resource() -{ - if [[ ${#nouns[@]} -eq 0 ]]; then - return 1 - fi - __kubectl_parse_get ${nouns[${#nouns[@]} -1]} - if [[ $? -eq 0 ]]; then - return 0 - fi -} - -__kubectl_custom_func() { - case ${last_command} in - kubectl_get | kubectl_describe | kubectl_delete | kubectl_stop) - __kubectl_get_resource - return - ;; - *) - ;; - esac -} -`) -``` - -And then I set that in my command definition: - -```go -cmds := &cobra.Command{ - Use: "kubectl", - Short: "kubectl controls the Kubernetes cluster manager", - Long: `kubectl controls the Kubernetes cluster manager. - -Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`, - Run: runHelp, - BashCompletionFunction: bash_completion_func, -} -``` - -The `BashCompletionFunction` option is really only valid/useful on the root command. Doing the above will cause `__kubectl_custom_func()` (`___custom_func()`) to be called when the built in processor was unable to find a solution. In the case of kubernetes a valid command might look something like `kubectl get pod [mypod]`. If you type `kubectl get pod [tab][tab]` the `__kubectl_customc_func()` will run because the cobra.Command only understood "kubectl" and "get." `__kubectl_custom_func()` will see that the cobra.Command is "kubectl_get" and will thus call another helper `__kubectl_get_resource()`. `__kubectl_get_resource` will look at the 'nouns' collected. In our example the only noun will be `pod`. So it will call `__kubectl_parse_get pod`. `__kubectl_parse_get` will actually call out to kubernetes and get any pods. It will then set `COMPREPLY` to valid pods! - -## Mark flags as required - -Most of the time completions will only show subcommands. But if a flag is required to make a subcommand work, you probably want it to show up when the user types [tab][tab]. Marking a flag as 'Required' is incredibly easy. - -```go -cmd.MarkFlagRequired("pod") -cmd.MarkFlagRequired("container") -``` - -and you'll get something like - -```bash -# kubectl exec [tab][tab][tab] --c --container= -p --pod= -``` - -# Specify valid filename extensions for flags that take a filename - -In this example we use --filename= and expect to get a json or yaml file as the argument. To make this easier we annotate the --filename flag with valid filename extensions. - -```go - annotations := []string{"json", "yaml", "yml"} - annotation := make(map[string][]string) - annotation[cobra.BashCompFilenameExt] = annotations - - flag := &pflag.Flag{ - Name: "filename", - Shorthand: "f", - Usage: usage, - Value: value, - DefValue: value.String(), - Annotations: annotation, - } - cmd.Flags().AddFlag(flag) -``` - -Now when you run a command with this filename flag you'll get something like - -```bash -# kubectl create -f -test/ example/ rpmbuild/ -hello.yml test.json -``` - -So while there are many other files in the CWD it only shows me subdirs and those with valid extensions. - -# Specify custom flag completion - -As for nouns, Cobra provides two ways of defining dynamic completion of flags. Note that both these methods can be used along-side each other as long as they are not both used for the same flag. - -**Note**: *Custom Completions written in Go* will automatically work for other shell-completion scripts (e.g., Fish shell), while *Custom Completions written in Bash* will only work for Bash shell-completion. It is therefore recommended to use *Custom Completions written in Go*. - -## 1. Custom completions of flags written in Go - -To provide a Go function that Cobra will execute when it needs the list of completion choices for a flag, you must register the function in the following manner: - -```go -flagName := "output" -cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return []string{"json", "table", "yaml"}, cobra.ShellCompDirectiveDefault -}) -``` -Notice that calling `RegisterFlagCompletionFunc()` is done through the `command` with which the flag is associated. In our example this dynamic completion will give results like so: - -```bash -# helm status --output [tab][tab] -json table yaml -``` - -### Debugging - -You can also easily debug your Go completion code for flags: -```bash -# helm __complete status --output "" -json -table -yaml -:4 -Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr -``` -***Important:*** You should **not** leave traces that print to stdout in your completion code as they will be interpreted as completion choices by the completion script. Instead, use the cobra-provided debugging traces functions mentioned in the above section. - -## 2. Custom completions of flags written in Bash - -Alternatively, you can use bash code for flag custom completion. Similar to the filename -completion and filtering using `cobra.BashCompFilenameExt`, you can specify -a custom flag completion bash function with `cobra.BashCompCustom`: - -```go - annotation := make(map[string][]string) - annotation[cobra.BashCompCustom] = []string{"__kubectl_get_namespaces"} - - flag := &pflag.Flag{ - Name: "namespace", - Usage: usage, - Annotations: annotation, - } - cmd.Flags().AddFlag(flag) -``` - -In addition add the `__kubectl_get_namespaces` implementation in the `BashCompletionFunction` -value, e.g.: - -```bash -__kubectl_get_namespaces() -{ - local template - template="{{ range .items }}{{ .metadata.name }} {{ end }}" - local kubectl_out - if kubectl_out=$(kubectl get -o template --template="${template}" namespace 2>/dev/null); then - COMPREPLY=( $( compgen -W "${kubectl_out}[*]" -- "$cur" ) ) - fi -} -``` -# Using bash aliases for commands - -You can also configure the `bash aliases` for the commands and they will also support completions. - -```bash -alias aliasname=origcommand -complete -o default -F __start_origcommand aliasname - -# and now when you run `aliasname` completion will make -# suggestions as it did for `origcommand`. - -$) aliasname -completion firstcommand secondcommand -``` diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/cobra.go b/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/cobra.go deleted file mode 100644 index d01becc8fa6..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/cobra.go +++ /dev/null @@ -1,207 +0,0 @@ -// Copyright Š 2013 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Commands similar to git, go tools and other modern CLI tools -// inspired by go, go-Commander, gh and subcommand - -package cobra - -import ( - "fmt" - "io" - "reflect" - "strconv" - "strings" - "text/template" - "time" - "unicode" -) - -var templateFuncs = template.FuncMap{ - "trim": strings.TrimSpace, - "trimRightSpace": trimRightSpace, - "trimTrailingWhitespaces": trimRightSpace, - "appendIfNotPresent": appendIfNotPresent, - "rpad": rpad, - "gt": Gt, - "eq": Eq, -} - -var initializers []func() - -// EnablePrefixMatching allows to set automatic prefix matching. Automatic prefix matching can be a dangerous thing -// to automatically enable in CLI tools. -// Set this to true to enable it. -var EnablePrefixMatching = false - -// EnableCommandSorting controls sorting of the slice of commands, which is turned on by default. -// To disable sorting, set it to false. -var EnableCommandSorting = true - -// MousetrapHelpText enables an information splash screen on Windows -// if the CLI is started from explorer.exe. -// To disable the mousetrap, just set this variable to blank string (""). -// Works only on Microsoft Windows. -var MousetrapHelpText = `This is a command line tool. - -You need to open cmd.exe and run it from there. -` - -// MousetrapDisplayDuration controls how long the MousetrapHelpText message is displayed on Windows -// if the CLI is started from explorer.exe. Set to 0 to wait for the return key to be pressed. -// To disable the mousetrap, just set MousetrapHelpText to blank string (""). -// Works only on Microsoft Windows. -var MousetrapDisplayDuration = 5 * time.Second - -// AddTemplateFunc adds a template function that's available to Usage and Help -// template generation. -func AddTemplateFunc(name string, tmplFunc interface{}) { - templateFuncs[name] = tmplFunc -} - -// AddTemplateFuncs adds multiple template functions that are available to Usage and -// Help template generation. -func AddTemplateFuncs(tmplFuncs template.FuncMap) { - for k, v := range tmplFuncs { - templateFuncs[k] = v - } -} - -// OnInitialize sets the passed functions to be run when each command's -// Execute method is called. -func OnInitialize(y ...func()) { - initializers = append(initializers, y...) -} - -// FIXME Gt is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra. - -// Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans, -// Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as -// ints and then compared. -func Gt(a interface{}, b interface{}) bool { - var left, right int64 - av := reflect.ValueOf(a) - - switch av.Kind() { - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: - left = int64(av.Len()) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - left = av.Int() - case reflect.String: - left, _ = strconv.ParseInt(av.String(), 10, 64) - } - - bv := reflect.ValueOf(b) - - switch bv.Kind() { - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: - right = int64(bv.Len()) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - right = bv.Int() - case reflect.String: - right, _ = strconv.ParseInt(bv.String(), 10, 64) - } - - return left > right -} - -// FIXME Eq is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra. - -// Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic. -func Eq(a interface{}, b interface{}) bool { - av := reflect.ValueOf(a) - bv := reflect.ValueOf(b) - - switch av.Kind() { - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: - panic("Eq called on unsupported type") - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return av.Int() == bv.Int() - case reflect.String: - return av.String() == bv.String() - } - return false -} - -func trimRightSpace(s string) string { - return strings.TrimRightFunc(s, unicode.IsSpace) -} - -// FIXME appendIfNotPresent is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra. - -// appendIfNotPresent will append stringToAppend to the end of s, but only if it's not yet present in s. -func appendIfNotPresent(s, stringToAppend string) string { - if strings.Contains(s, stringToAppend) { - return s - } - return s + " " + stringToAppend -} - -// rpad adds padding to the right of a string. -func rpad(s string, padding int) string { - template := fmt.Sprintf("%%-%ds", padding) - return fmt.Sprintf(template, s) -} - -// tmpl executes the given template text on data, writing the result to w. -func tmpl(w io.Writer, text string, data interface{}) error { - t := template.New("top") - t.Funcs(templateFuncs) - template.Must(t.Parse(text)) - return t.Execute(w, data) -} - -// ld compares two strings and returns the levenshtein distance between them. -func ld(s, t string, ignoreCase bool) int { - if ignoreCase { - s = strings.ToLower(s) - t = strings.ToLower(t) - } - d := make([][]int, len(s)+1) - for i := range d { - d[i] = make([]int, len(t)+1) - } - for i := range d { - d[i][0] = i - } - for j := range d[0] { - d[0][j] = j - } - for j := 1; j <= len(t); j++ { - for i := 1; i <= len(s); i++ { - if s[i-1] == t[j-1] { - d[i][j] = d[i-1][j-1] - } else { - min := d[i-1][j] - if d[i][j-1] < min { - min = d[i][j-1] - } - if d[i-1][j-1] < min { - min = d[i-1][j-1] - } - d[i][j] = min + 1 - } - } - - } - return d[len(s)][len(t)] -} - -func stringInSlice(a string, list []string) bool { - for _, b := range list { - if b == a { - return true - } - } - return false -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/command.go b/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/command.go deleted file mode 100644 index 88e6ed77d0e..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/command.go +++ /dev/null @@ -1,1634 +0,0 @@ -// Copyright Š 2013 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces. -// In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code. -package cobra - -import ( - "bytes" - "context" - "fmt" - "io" - "os" - "path/filepath" - "sort" - "strings" - - flag "github.com/spf13/pflag" -) - -// FParseErrWhitelist configures Flag parse errors to be ignored -type FParseErrWhitelist flag.ParseErrorsWhitelist - -// Command is just that, a command for your application. -// E.g. 'go run ...' - 'run' is the command. Cobra requires -// you to define the usage and description as part of your command -// definition to ensure usability. -type Command struct { - // Use is the one-line usage message. - Use string - - // Aliases is an array of aliases that can be used instead of the first word in Use. - Aliases []string - - // SuggestFor is an array of command names for which this command will be suggested - - // similar to aliases but only suggests. - SuggestFor []string - - // Short is the short description shown in the 'help' output. - Short string - - // Long is the long message shown in the 'help ' output. - Long string - - // Example is examples of how to use the command. - Example string - - // ValidArgs is list of all valid non-flag arguments that are accepted in bash completions - ValidArgs []string - // ValidArgsFunction is an optional function that provides valid non-flag arguments for bash completion. - // It is a dynamic version of using ValidArgs. - // Only one of ValidArgs and ValidArgsFunction can be used for a command. - ValidArgsFunction func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) - - // Expected arguments - Args PositionalArgs - - // ArgAliases is List of aliases for ValidArgs. - // These are not suggested to the user in the bash completion, - // but accepted if entered manually. - ArgAliases []string - - // BashCompletionFunction is custom functions used by the bash autocompletion generator. - BashCompletionFunction string - - // Deprecated defines, if this command is deprecated and should print this string when used. - Deprecated string - - // Hidden defines, if this command is hidden and should NOT show up in the list of available commands. - Hidden bool - - // Annotations are key/value pairs that can be used by applications to identify or - // group commands. - Annotations map[string]string - - // Version defines the version for this command. If this value is non-empty and the command does not - // define a "version" flag, a "version" boolean flag will be added to the command and, if specified, - // will print content of the "Version" variable. A shorthand "v" flag will also be added if the - // command does not define one. - Version string - - // The *Run functions are executed in the following order: - // * PersistentPreRun() - // * PreRun() - // * Run() - // * PostRun() - // * PersistentPostRun() - // All functions get the same args, the arguments after the command name. - // - // PersistentPreRun: children of this command will inherit and execute. - PersistentPreRun func(cmd *Command, args []string) - // PersistentPreRunE: PersistentPreRun but returns an error. - PersistentPreRunE func(cmd *Command, args []string) error - // PreRun: children of this command will not inherit. - PreRun func(cmd *Command, args []string) - // PreRunE: PreRun but returns an error. - PreRunE func(cmd *Command, args []string) error - // Run: Typically the actual work function. Most commands will only implement this. - Run func(cmd *Command, args []string) - // RunE: Run but returns an error. - RunE func(cmd *Command, args []string) error - // PostRun: run after the Run command. - PostRun func(cmd *Command, args []string) - // PostRunE: PostRun but returns an error. - PostRunE func(cmd *Command, args []string) error - // PersistentPostRun: children of this command will inherit and execute after PostRun. - PersistentPostRun func(cmd *Command, args []string) - // PersistentPostRunE: PersistentPostRun but returns an error. - PersistentPostRunE func(cmd *Command, args []string) error - - // SilenceErrors is an option to quiet errors down stream. - SilenceErrors bool - - // SilenceUsage is an option to silence usage when an error occurs. - SilenceUsage bool - - // DisableFlagParsing disables the flag parsing. - // If this is true all flags will be passed to the command as arguments. - DisableFlagParsing bool - - // DisableAutoGenTag defines, if gen tag ("Auto generated by spf13/cobra...") - // will be printed by generating docs for this command. - DisableAutoGenTag bool - - // DisableFlagsInUseLine will disable the addition of [flags] to the usage - // line of a command when printing help or generating docs - DisableFlagsInUseLine bool - - // DisableSuggestions disables the suggestions based on Levenshtein distance - // that go along with 'unknown command' messages. - DisableSuggestions bool - // SuggestionsMinimumDistance defines minimum levenshtein distance to display suggestions. - // Must be > 0. - SuggestionsMinimumDistance int - - // TraverseChildren parses flags on all parents before executing child command. - TraverseChildren bool - - // FParseErrWhitelist flag parse errors to be ignored - FParseErrWhitelist FParseErrWhitelist - - ctx context.Context - - // commands is the list of commands supported by this program. - commands []*Command - // parent is a parent command for this command. - parent *Command - // Max lengths of commands' string lengths for use in padding. - commandsMaxUseLen int - commandsMaxCommandPathLen int - commandsMaxNameLen int - // commandsAreSorted defines, if command slice are sorted or not. - commandsAreSorted bool - // commandCalledAs is the name or alias value used to call this command. - commandCalledAs struct { - name string - called bool - } - - // args is actual args parsed from flags. - args []string - // flagErrorBuf contains all error messages from pflag. - flagErrorBuf *bytes.Buffer - // flags is full set of flags. - flags *flag.FlagSet - // pflags contains persistent flags. - pflags *flag.FlagSet - // lflags contains local flags. - lflags *flag.FlagSet - // iflags contains inherited flags. - iflags *flag.FlagSet - // parentsPflags is all persistent flags of cmd's parents. - parentsPflags *flag.FlagSet - // globNormFunc is the global normalization function - // that we can use on every pflag set and children commands - globNormFunc func(f *flag.FlagSet, name string) flag.NormalizedName - - // usageFunc is usage func defined by user. - usageFunc func(*Command) error - // usageTemplate is usage template defined by user. - usageTemplate string - // flagErrorFunc is func defined by user and it's called when the parsing of - // flags returns an error. - flagErrorFunc func(*Command, error) error - // helpTemplate is help template defined by user. - helpTemplate string - // helpFunc is help func defined by user. - helpFunc func(*Command, []string) - // helpCommand is command with usage 'help'. If it's not defined by user, - // cobra uses default help command. - helpCommand *Command - // versionTemplate is the version template defined by user. - versionTemplate string - - // inReader is a reader defined by the user that replaces stdin - inReader io.Reader - // outWriter is a writer defined by the user that replaces stdout - outWriter io.Writer - // errWriter is a writer defined by the user that replaces stderr - errWriter io.Writer -} - -// Context returns underlying command context. If command wasn't -// executed with ExecuteContext Context returns Background context. -func (c *Command) Context() context.Context { - return c.ctx -} - -// SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden -// particularly useful when testing. -func (c *Command) SetArgs(a []string) { - c.args = a -} - -// SetOutput sets the destination for usage and error messages. -// If output is nil, os.Stderr is used. -// Deprecated: Use SetOut and/or SetErr instead -func (c *Command) SetOutput(output io.Writer) { - c.outWriter = output - c.errWriter = output -} - -// SetOut sets the destination for usage messages. -// If newOut is nil, os.Stdout is used. -func (c *Command) SetOut(newOut io.Writer) { - c.outWriter = newOut -} - -// SetErr sets the destination for error messages. -// If newErr is nil, os.Stderr is used. -func (c *Command) SetErr(newErr io.Writer) { - c.errWriter = newErr -} - -// SetIn sets the source for input data -// If newIn is nil, os.Stdin is used. -func (c *Command) SetIn(newIn io.Reader) { - c.inReader = newIn -} - -// SetUsageFunc sets usage function. Usage can be defined by application. -func (c *Command) SetUsageFunc(f func(*Command) error) { - c.usageFunc = f -} - -// SetUsageTemplate sets usage template. Can be defined by Application. -func (c *Command) SetUsageTemplate(s string) { - c.usageTemplate = s -} - -// SetFlagErrorFunc sets a function to generate an error when flag parsing -// fails. -func (c *Command) SetFlagErrorFunc(f func(*Command, error) error) { - c.flagErrorFunc = f -} - -// SetHelpFunc sets help function. Can be defined by Application. -func (c *Command) SetHelpFunc(f func(*Command, []string)) { - c.helpFunc = f -} - -// SetHelpCommand sets help command. -func (c *Command) SetHelpCommand(cmd *Command) { - c.helpCommand = cmd -} - -// SetHelpTemplate sets help template to be used. Application can use it to set custom template. -func (c *Command) SetHelpTemplate(s string) { - c.helpTemplate = s -} - -// SetVersionTemplate sets version template to be used. Application can use it to set custom template. -func (c *Command) SetVersionTemplate(s string) { - c.versionTemplate = s -} - -// SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands. -// The user should not have a cyclic dependency on commands. -func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string) flag.NormalizedName) { - c.Flags().SetNormalizeFunc(n) - c.PersistentFlags().SetNormalizeFunc(n) - c.globNormFunc = n - - for _, command := range c.commands { - command.SetGlobalNormalizationFunc(n) - } -} - -// OutOrStdout returns output to stdout. -func (c *Command) OutOrStdout() io.Writer { - return c.getOut(os.Stdout) -} - -// OutOrStderr returns output to stderr -func (c *Command) OutOrStderr() io.Writer { - return c.getOut(os.Stderr) -} - -// ErrOrStderr returns output to stderr -func (c *Command) ErrOrStderr() io.Writer { - return c.getErr(os.Stderr) -} - -// InOrStdin returns input to stdin -func (c *Command) InOrStdin() io.Reader { - return c.getIn(os.Stdin) -} - -func (c *Command) getOut(def io.Writer) io.Writer { - if c.outWriter != nil { - return c.outWriter - } - if c.HasParent() { - return c.parent.getOut(def) - } - return def -} - -func (c *Command) getErr(def io.Writer) io.Writer { - if c.errWriter != nil { - return c.errWriter - } - if c.HasParent() { - return c.parent.getErr(def) - } - return def -} - -func (c *Command) getIn(def io.Reader) io.Reader { - if c.inReader != nil { - return c.inReader - } - if c.HasParent() { - return c.parent.getIn(def) - } - return def -} - -// UsageFunc returns either the function set by SetUsageFunc for this command -// or a parent, or it returns a default usage function. -func (c *Command) UsageFunc() (f func(*Command) error) { - if c.usageFunc != nil { - return c.usageFunc - } - if c.HasParent() { - return c.Parent().UsageFunc() - } - return func(c *Command) error { - c.mergePersistentFlags() - err := tmpl(c.OutOrStderr(), c.UsageTemplate(), c) - if err != nil { - c.Println(err) - } - return err - } -} - -// Usage puts out the usage for the command. -// Used when a user provides invalid input. -// Can be defined by user by overriding UsageFunc. -func (c *Command) Usage() error { - return c.UsageFunc()(c) -} - -// HelpFunc returns either the function set by SetHelpFunc for this command -// or a parent, or it returns a function with default help behavior. -func (c *Command) HelpFunc() func(*Command, []string) { - if c.helpFunc != nil { - return c.helpFunc - } - if c.HasParent() { - return c.Parent().HelpFunc() - } - return func(c *Command, a []string) { - c.mergePersistentFlags() - // The help should be sent to stdout - // See https://github.com/spf13/cobra/issues/1002 - err := tmpl(c.OutOrStdout(), c.HelpTemplate(), c) - if err != nil { - c.Println(err) - } - } -} - -// Help puts out the help for the command. -// Used when a user calls help [command]. -// Can be defined by user by overriding HelpFunc. -func (c *Command) Help() error { - c.HelpFunc()(c, []string{}) - return nil -} - -// UsageString returns usage string. -func (c *Command) UsageString() string { - // Storing normal writers - tmpOutput := c.outWriter - tmpErr := c.errWriter - - bb := new(bytes.Buffer) - c.outWriter = bb - c.errWriter = bb - - c.Usage() - - // Setting things back to normal - c.outWriter = tmpOutput - c.errWriter = tmpErr - - return bb.String() -} - -// FlagErrorFunc returns either the function set by SetFlagErrorFunc for this -// command or a parent, or it returns a function which returns the original -// error. -func (c *Command) FlagErrorFunc() (f func(*Command, error) error) { - if c.flagErrorFunc != nil { - return c.flagErrorFunc - } - - if c.HasParent() { - return c.parent.FlagErrorFunc() - } - return func(c *Command, err error) error { - return err - } -} - -var minUsagePadding = 25 - -// UsagePadding return padding for the usage. -func (c *Command) UsagePadding() int { - if c.parent == nil || minUsagePadding > c.parent.commandsMaxUseLen { - return minUsagePadding - } - return c.parent.commandsMaxUseLen -} - -var minCommandPathPadding = 11 - -// CommandPathPadding return padding for the command path. -func (c *Command) CommandPathPadding() int { - if c.parent == nil || minCommandPathPadding > c.parent.commandsMaxCommandPathLen { - return minCommandPathPadding - } - return c.parent.commandsMaxCommandPathLen -} - -var minNamePadding = 11 - -// NamePadding returns padding for the name. -func (c *Command) NamePadding() int { - if c.parent == nil || minNamePadding > c.parent.commandsMaxNameLen { - return minNamePadding - } - return c.parent.commandsMaxNameLen -} - -// UsageTemplate returns usage template for the command. -func (c *Command) UsageTemplate() string { - if c.usageTemplate != "" { - return c.usageTemplate - } - - if c.HasParent() { - return c.parent.UsageTemplate() - } - return `Usage:{{if .Runnable}} - {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}} - {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}} - -Aliases: - {{.NameAndAliases}}{{end}}{{if .HasExample}} - -Examples: -{{.Example}}{{end}}{{if .HasAvailableSubCommands}} - -Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}} - {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}} - -Flags: -{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}} - -Global Flags: -{{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasHelpSubCommands}} - -Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}} - {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}} - -Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}} -` -} - -// HelpTemplate return help template for the command. -func (c *Command) HelpTemplate() string { - if c.helpTemplate != "" { - return c.helpTemplate - } - - if c.HasParent() { - return c.parent.HelpTemplate() - } - return `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}} - -{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` -} - -// VersionTemplate return version template for the command. -func (c *Command) VersionTemplate() string { - if c.versionTemplate != "" { - return c.versionTemplate - } - - if c.HasParent() { - return c.parent.VersionTemplate() - } - return `{{with .Name}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}} -` -} - -func hasNoOptDefVal(name string, fs *flag.FlagSet) bool { - flag := fs.Lookup(name) - if flag == nil { - return false - } - return flag.NoOptDefVal != "" -} - -func shortHasNoOptDefVal(name string, fs *flag.FlagSet) bool { - if len(name) == 0 { - return false - } - - flag := fs.ShorthandLookup(name[:1]) - if flag == nil { - return false - } - return flag.NoOptDefVal != "" -} - -func stripFlags(args []string, c *Command) []string { - if len(args) == 0 { - return args - } - c.mergePersistentFlags() - - commands := []string{} - flags := c.Flags() - -Loop: - for len(args) > 0 { - s := args[0] - args = args[1:] - switch { - case s == "--": - // "--" terminates the flags - break Loop - case strings.HasPrefix(s, "--") && !strings.Contains(s, "=") && !hasNoOptDefVal(s[2:], flags): - // If '--flag arg' then - // delete arg from args. - fallthrough // (do the same as below) - case strings.HasPrefix(s, "-") && !strings.Contains(s, "=") && len(s) == 2 && !shortHasNoOptDefVal(s[1:], flags): - // If '-f arg' then - // delete 'arg' from args or break the loop if len(args) <= 1. - if len(args) <= 1 { - break Loop - } else { - args = args[1:] - continue - } - case s != "" && !strings.HasPrefix(s, "-"): - commands = append(commands, s) - } - } - - return commands -} - -// argsMinusFirstX removes only the first x from args. Otherwise, commands that look like -// openshift admin policy add-role-to-user admin my-user, lose the admin argument (arg[4]). -func argsMinusFirstX(args []string, x string) []string { - for i, y := range args { - if x == y { - ret := []string{} - ret = append(ret, args[:i]...) - ret = append(ret, args[i+1:]...) - return ret - } - } - return args -} - -func isFlagArg(arg string) bool { - return ((len(arg) >= 3 && arg[1] == '-') || - (len(arg) >= 2 && arg[0] == '-' && arg[1] != '-')) -} - -// Find the target command given the args and command tree -// Meant to be run on the highest node. Only searches down. -func (c *Command) Find(args []string) (*Command, []string, error) { - var innerfind func(*Command, []string) (*Command, []string) - - innerfind = func(c *Command, innerArgs []string) (*Command, []string) { - argsWOflags := stripFlags(innerArgs, c) - if len(argsWOflags) == 0 { - return c, innerArgs - } - nextSubCmd := argsWOflags[0] - - cmd := c.findNext(nextSubCmd) - if cmd != nil { - return innerfind(cmd, argsMinusFirstX(innerArgs, nextSubCmd)) - } - return c, innerArgs - } - - commandFound, a := innerfind(c, args) - if commandFound.Args == nil { - return commandFound, a, legacyArgs(commandFound, stripFlags(a, commandFound)) - } - return commandFound, a, nil -} - -func (c *Command) findSuggestions(arg string) string { - if c.DisableSuggestions { - return "" - } - if c.SuggestionsMinimumDistance <= 0 { - c.SuggestionsMinimumDistance = 2 - } - suggestionsString := "" - if suggestions := c.SuggestionsFor(arg); len(suggestions) > 0 { - suggestionsString += "\n\nDid you mean this?\n" - for _, s := range suggestions { - suggestionsString += fmt.Sprintf("\t%v\n", s) - } - } - return suggestionsString -} - -func (c *Command) findNext(next string) *Command { - matches := make([]*Command, 0) - for _, cmd := range c.commands { - if cmd.Name() == next || cmd.HasAlias(next) { - cmd.commandCalledAs.name = next - return cmd - } - if EnablePrefixMatching && cmd.hasNameOrAliasPrefix(next) { - matches = append(matches, cmd) - } - } - - if len(matches) == 1 { - return matches[0] - } - - return nil -} - -// Traverse the command tree to find the command, and parse args for -// each parent. -func (c *Command) Traverse(args []string) (*Command, []string, error) { - flags := []string{} - inFlag := false - - for i, arg := range args { - switch { - // A long flag with a space separated value - case strings.HasPrefix(arg, "--") && !strings.Contains(arg, "="): - // TODO: this isn't quite right, we should really check ahead for 'true' or 'false' - inFlag = !hasNoOptDefVal(arg[2:], c.Flags()) - flags = append(flags, arg) - continue - // A short flag with a space separated value - case strings.HasPrefix(arg, "-") && !strings.Contains(arg, "=") && len(arg) == 2 && !shortHasNoOptDefVal(arg[1:], c.Flags()): - inFlag = true - flags = append(flags, arg) - continue - // The value for a flag - case inFlag: - inFlag = false - flags = append(flags, arg) - continue - // A flag without a value, or with an `=` separated value - case isFlagArg(arg): - flags = append(flags, arg) - continue - } - - cmd := c.findNext(arg) - if cmd == nil { - return c, args, nil - } - - if err := c.ParseFlags(flags); err != nil { - return nil, args, err - } - return cmd.Traverse(args[i+1:]) - } - return c, args, nil -} - -// SuggestionsFor provides suggestions for the typedName. -func (c *Command) SuggestionsFor(typedName string) []string { - suggestions := []string{} - for _, cmd := range c.commands { - if cmd.IsAvailableCommand() { - levenshteinDistance := ld(typedName, cmd.Name(), true) - suggestByLevenshtein := levenshteinDistance <= c.SuggestionsMinimumDistance - suggestByPrefix := strings.HasPrefix(strings.ToLower(cmd.Name()), strings.ToLower(typedName)) - if suggestByLevenshtein || suggestByPrefix { - suggestions = append(suggestions, cmd.Name()) - } - for _, explicitSuggestion := range cmd.SuggestFor { - if strings.EqualFold(typedName, explicitSuggestion) { - suggestions = append(suggestions, cmd.Name()) - } - } - } - } - return suggestions -} - -// VisitParents visits all parents of the command and invokes fn on each parent. -func (c *Command) VisitParents(fn func(*Command)) { - if c.HasParent() { - fn(c.Parent()) - c.Parent().VisitParents(fn) - } -} - -// Root finds root command. -func (c *Command) Root() *Command { - if c.HasParent() { - return c.Parent().Root() - } - return c -} - -// ArgsLenAtDash will return the length of c.Flags().Args at the moment -// when a -- was found during args parsing. -func (c *Command) ArgsLenAtDash() int { - return c.Flags().ArgsLenAtDash() -} - -func (c *Command) execute(a []string) (err error) { - if c == nil { - return fmt.Errorf("Called Execute() on a nil Command") - } - - if len(c.Deprecated) > 0 { - c.Printf("Command %q is deprecated, %s\n", c.Name(), c.Deprecated) - } - - // initialize help and version flag at the last point possible to allow for user - // overriding - c.InitDefaultHelpFlag() - c.InitDefaultVersionFlag() - - err = c.ParseFlags(a) - if err != nil { - return c.FlagErrorFunc()(c, err) - } - - // If help is called, regardless of other flags, return we want help. - // Also say we need help if the command isn't runnable. - helpVal, err := c.Flags().GetBool("help") - if err != nil { - // should be impossible to get here as we always declare a help - // flag in InitDefaultHelpFlag() - c.Println("\"help\" flag declared as non-bool. Please correct your code") - return err - } - - if helpVal { - return flag.ErrHelp - } - - // for back-compat, only add version flag behavior if version is defined - if c.Version != "" { - versionVal, err := c.Flags().GetBool("version") - if err != nil { - c.Println("\"version\" flag declared as non-bool. Please correct your code") - return err - } - if versionVal { - err := tmpl(c.OutOrStdout(), c.VersionTemplate(), c) - if err != nil { - c.Println(err) - } - return err - } - } - - if !c.Runnable() { - return flag.ErrHelp - } - - c.preRun() - - argWoFlags := c.Flags().Args() - if c.DisableFlagParsing { - argWoFlags = a - } - - if err := c.ValidateArgs(argWoFlags); err != nil { - return err - } - - for p := c; p != nil; p = p.Parent() { - if p.PersistentPreRunE != nil { - if err := p.PersistentPreRunE(c, argWoFlags); err != nil { - return err - } - break - } else if p.PersistentPreRun != nil { - p.PersistentPreRun(c, argWoFlags) - break - } - } - if c.PreRunE != nil { - if err := c.PreRunE(c, argWoFlags); err != nil { - return err - } - } else if c.PreRun != nil { - c.PreRun(c, argWoFlags) - } - - if err := c.validateRequiredFlags(); err != nil { - return err - } - if c.RunE != nil { - if err := c.RunE(c, argWoFlags); err != nil { - return err - } - } else { - c.Run(c, argWoFlags) - } - if c.PostRunE != nil { - if err := c.PostRunE(c, argWoFlags); err != nil { - return err - } - } else if c.PostRun != nil { - c.PostRun(c, argWoFlags) - } - for p := c; p != nil; p = p.Parent() { - if p.PersistentPostRunE != nil { - if err := p.PersistentPostRunE(c, argWoFlags); err != nil { - return err - } - break - } else if p.PersistentPostRun != nil { - p.PersistentPostRun(c, argWoFlags) - break - } - } - - return nil -} - -func (c *Command) preRun() { - for _, x := range initializers { - x() - } -} - -// ExecuteContext is the same as Execute(), but sets the ctx on the command. -// Retrieve ctx by calling cmd.Context() inside your *Run lifecycle functions. -func (c *Command) ExecuteContext(ctx context.Context) error { - c.ctx = ctx - return c.Execute() -} - -// Execute uses the args (os.Args[1:] by default) -// and run through the command tree finding appropriate matches -// for commands and then corresponding flags. -func (c *Command) Execute() error { - _, err := c.ExecuteC() - return err -} - -// ExecuteC executes the command. -func (c *Command) ExecuteC() (cmd *Command, err error) { - if c.ctx == nil { - c.ctx = context.Background() - } - - // Regardless of what command execute is called on, run on Root only - if c.HasParent() { - return c.Root().ExecuteC() - } - - // windows hook - if preExecHookFn != nil { - preExecHookFn(c) - } - - // initialize help as the last point possible to allow for user - // overriding - c.InitDefaultHelpCmd() - - args := c.args - - // Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155 - if c.args == nil && filepath.Base(os.Args[0]) != "cobra.test" { - args = os.Args[1:] - } - - // initialize the hidden command to be used for bash completion - c.initCompleteCmd(args) - - var flags []string - if c.TraverseChildren { - cmd, flags, err = c.Traverse(args) - } else { - cmd, flags, err = c.Find(args) - } - if err != nil { - // If found parse to a subcommand and then failed, talk about the subcommand - if cmd != nil { - c = cmd - } - if !c.SilenceErrors { - c.Println("Error:", err.Error()) - c.Printf("Run '%v --help' for usage.\n", c.CommandPath()) - } - return c, err - } - - cmd.commandCalledAs.called = true - if cmd.commandCalledAs.name == "" { - cmd.commandCalledAs.name = cmd.Name() - } - - // We have to pass global context to children command - // if context is present on the parent command. - if cmd.ctx == nil { - cmd.ctx = c.ctx - } - - err = cmd.execute(flags) - if err != nil { - // Always show help if requested, even if SilenceErrors is in - // effect - if err == flag.ErrHelp { - cmd.HelpFunc()(cmd, args) - return cmd, nil - } - - // If root command has SilentErrors flagged, - // all subcommands should respect it - if !cmd.SilenceErrors && !c.SilenceErrors { - c.Println("Error:", err.Error()) - } - - // If root command has SilentUsage flagged, - // all subcommands should respect it - if !cmd.SilenceUsage && !c.SilenceUsage { - c.Println(cmd.UsageString()) - } - } - return cmd, err -} - -func (c *Command) ValidateArgs(args []string) error { - if c.Args == nil { - return nil - } - return c.Args(c, args) -} - -func (c *Command) validateRequiredFlags() error { - flags := c.Flags() - missingFlagNames := []string{} - flags.VisitAll(func(pflag *flag.Flag) { - requiredAnnotation, found := pflag.Annotations[BashCompOneRequiredFlag] - if !found { - return - } - if (requiredAnnotation[0] == "true") && !pflag.Changed { - missingFlagNames = append(missingFlagNames, pflag.Name) - } - }) - - if len(missingFlagNames) > 0 { - return fmt.Errorf(`required flag(s) "%s" not set`, strings.Join(missingFlagNames, `", "`)) - } - return nil -} - -// InitDefaultHelpFlag adds default help flag to c. -// It is called automatically by executing the c or by calling help and usage. -// If c already has help flag, it will do nothing. -func (c *Command) InitDefaultHelpFlag() { - c.mergePersistentFlags() - if c.Flags().Lookup("help") == nil { - usage := "help for " - if c.Name() == "" { - usage += "this command" - } else { - usage += c.Name() - } - c.Flags().BoolP("help", "h", false, usage) - } -} - -// InitDefaultVersionFlag adds default version flag to c. -// It is called automatically by executing the c. -// If c already has a version flag, it will do nothing. -// If c.Version is empty, it will do nothing. -func (c *Command) InitDefaultVersionFlag() { - if c.Version == "" { - return - } - - c.mergePersistentFlags() - if c.Flags().Lookup("version") == nil { - usage := "version for " - if c.Name() == "" { - usage += "this command" - } else { - usage += c.Name() - } - if c.Flags().ShorthandLookup("v") == nil { - c.Flags().BoolP("version", "v", false, usage) - } else { - c.Flags().Bool("version", false, usage) - } - } -} - -// InitDefaultHelpCmd adds default help command to c. -// It is called automatically by executing the c or by calling help and usage. -// If c already has help command or c has no subcommands, it will do nothing. -func (c *Command) InitDefaultHelpCmd() { - if !c.HasSubCommands() { - return - } - - if c.helpCommand == nil { - c.helpCommand = &Command{ - Use: "help [command]", - Short: "Help about any command", - Long: `Help provides help for any command in the application. -Simply type ` + c.Name() + ` help [path to command] for full details.`, - - Run: func(c *Command, args []string) { - cmd, _, e := c.Root().Find(args) - if cmd == nil || e != nil { - c.Printf("Unknown help topic %#q\n", args) - c.Root().Usage() - } else { - cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown - cmd.Help() - } - }, - } - } - c.RemoveCommand(c.helpCommand) - c.AddCommand(c.helpCommand) -} - -// ResetCommands delete parent, subcommand and help command from c. -func (c *Command) ResetCommands() { - c.parent = nil - c.commands = nil - c.helpCommand = nil - c.parentsPflags = nil -} - -// Sorts commands by their names. -type commandSorterByName []*Command - -func (c commandSorterByName) Len() int { return len(c) } -func (c commandSorterByName) Swap(i, j int) { c[i], c[j] = c[j], c[i] } -func (c commandSorterByName) Less(i, j int) bool { return c[i].Name() < c[j].Name() } - -// Commands returns a sorted slice of child commands. -func (c *Command) Commands() []*Command { - // do not sort commands if it already sorted or sorting was disabled - if EnableCommandSorting && !c.commandsAreSorted { - sort.Sort(commandSorterByName(c.commands)) - c.commandsAreSorted = true - } - return c.commands -} - -// AddCommand adds one or more commands to this parent command. -func (c *Command) AddCommand(cmds ...*Command) { - for i, x := range cmds { - if cmds[i] == c { - panic("Command can't be a child of itself") - } - cmds[i].parent = c - // update max lengths - usageLen := len(x.Use) - if usageLen > c.commandsMaxUseLen { - c.commandsMaxUseLen = usageLen - } - commandPathLen := len(x.CommandPath()) - if commandPathLen > c.commandsMaxCommandPathLen { - c.commandsMaxCommandPathLen = commandPathLen - } - nameLen := len(x.Name()) - if nameLen > c.commandsMaxNameLen { - c.commandsMaxNameLen = nameLen - } - // If global normalization function exists, update all children - if c.globNormFunc != nil { - x.SetGlobalNormalizationFunc(c.globNormFunc) - } - c.commands = append(c.commands, x) - c.commandsAreSorted = false - } -} - -// RemoveCommand removes one or more commands from a parent command. -func (c *Command) RemoveCommand(cmds ...*Command) { - commands := []*Command{} -main: - for _, command := range c.commands { - for _, cmd := range cmds { - if command == cmd { - command.parent = nil - continue main - } - } - commands = append(commands, command) - } - c.commands = commands - // recompute all lengths - c.commandsMaxUseLen = 0 - c.commandsMaxCommandPathLen = 0 - c.commandsMaxNameLen = 0 - for _, command := range c.commands { - usageLen := len(command.Use) - if usageLen > c.commandsMaxUseLen { - c.commandsMaxUseLen = usageLen - } - commandPathLen := len(command.CommandPath()) - if commandPathLen > c.commandsMaxCommandPathLen { - c.commandsMaxCommandPathLen = commandPathLen - } - nameLen := len(command.Name()) - if nameLen > c.commandsMaxNameLen { - c.commandsMaxNameLen = nameLen - } - } -} - -// Print is a convenience method to Print to the defined output, fallback to Stderr if not set. -func (c *Command) Print(i ...interface{}) { - fmt.Fprint(c.OutOrStderr(), i...) -} - -// Println is a convenience method to Println to the defined output, fallback to Stderr if not set. -func (c *Command) Println(i ...interface{}) { - c.Print(fmt.Sprintln(i...)) -} - -// Printf is a convenience method to Printf to the defined output, fallback to Stderr if not set. -func (c *Command) Printf(format string, i ...interface{}) { - c.Print(fmt.Sprintf(format, i...)) -} - -// PrintErr is a convenience method to Print to the defined Err output, fallback to Stderr if not set. -func (c *Command) PrintErr(i ...interface{}) { - fmt.Fprint(c.ErrOrStderr(), i...) -} - -// PrintErrln is a convenience method to Println to the defined Err output, fallback to Stderr if not set. -func (c *Command) PrintErrln(i ...interface{}) { - c.Print(fmt.Sprintln(i...)) -} - -// PrintErrf is a convenience method to Printf to the defined Err output, fallback to Stderr if not set. -func (c *Command) PrintErrf(format string, i ...interface{}) { - c.Print(fmt.Sprintf(format, i...)) -} - -// CommandPath returns the full path to this command. -func (c *Command) CommandPath() string { - if c.HasParent() { - return c.Parent().CommandPath() + " " + c.Name() - } - return c.Name() -} - -// UseLine puts out the full usage for a given command (including parents). -func (c *Command) UseLine() string { - var useline string - if c.HasParent() { - useline = c.parent.CommandPath() + " " + c.Use - } else { - useline = c.Use - } - if c.DisableFlagsInUseLine { - return useline - } - if c.HasAvailableFlags() && !strings.Contains(useline, "[flags]") { - useline += " [flags]" - } - return useline -} - -// DebugFlags used to determine which flags have been assigned to which commands -// and which persist. -func (c *Command) DebugFlags() { - c.Println("DebugFlags called on", c.Name()) - var debugflags func(*Command) - - debugflags = func(x *Command) { - if x.HasFlags() || x.HasPersistentFlags() { - c.Println(x.Name()) - } - if x.HasFlags() { - x.flags.VisitAll(func(f *flag.Flag) { - if x.HasPersistentFlags() && x.persistentFlag(f.Name) != nil { - c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [LP]") - } else { - c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [L]") - } - }) - } - if x.HasPersistentFlags() { - x.pflags.VisitAll(func(f *flag.Flag) { - if x.HasFlags() { - if x.flags.Lookup(f.Name) == nil { - c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]") - } - } else { - c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]") - } - }) - } - c.Println(x.flagErrorBuf) - if x.HasSubCommands() { - for _, y := range x.commands { - debugflags(y) - } - } - } - - debugflags(c) -} - -// Name returns the command's name: the first word in the use line. -func (c *Command) Name() string { - name := c.Use - i := strings.Index(name, " ") - if i >= 0 { - name = name[:i] - } - return name -} - -// HasAlias determines if a given string is an alias of the command. -func (c *Command) HasAlias(s string) bool { - for _, a := range c.Aliases { - if a == s { - return true - } - } - return false -} - -// CalledAs returns the command name or alias that was used to invoke -// this command or an empty string if the command has not been called. -func (c *Command) CalledAs() string { - if c.commandCalledAs.called { - return c.commandCalledAs.name - } - return "" -} - -// hasNameOrAliasPrefix returns true if the Name or any of aliases start -// with prefix -func (c *Command) hasNameOrAliasPrefix(prefix string) bool { - if strings.HasPrefix(c.Name(), prefix) { - c.commandCalledAs.name = c.Name() - return true - } - for _, alias := range c.Aliases { - if strings.HasPrefix(alias, prefix) { - c.commandCalledAs.name = alias - return true - } - } - return false -} - -// NameAndAliases returns a list of the command name and all aliases -func (c *Command) NameAndAliases() string { - return strings.Join(append([]string{c.Name()}, c.Aliases...), ", ") -} - -// HasExample determines if the command has example. -func (c *Command) HasExample() bool { - return len(c.Example) > 0 -} - -// Runnable determines if the command is itself runnable. -func (c *Command) Runnable() bool { - return c.Run != nil || c.RunE != nil -} - -// HasSubCommands determines if the command has children commands. -func (c *Command) HasSubCommands() bool { - return len(c.commands) > 0 -} - -// IsAvailableCommand determines if a command is available as a non-help command -// (this includes all non deprecated/hidden commands). -func (c *Command) IsAvailableCommand() bool { - if len(c.Deprecated) != 0 || c.Hidden { - return false - } - - if c.HasParent() && c.Parent().helpCommand == c { - return false - } - - if c.Runnable() || c.HasAvailableSubCommands() { - return true - } - - return false -} - -// IsAdditionalHelpTopicCommand determines if a command is an additional -// help topic command; additional help topic command is determined by the -// fact that it is NOT runnable/hidden/deprecated, and has no sub commands that -// are runnable/hidden/deprecated. -// Concrete example: https://github.com/spf13/cobra/issues/393#issuecomment-282741924. -func (c *Command) IsAdditionalHelpTopicCommand() bool { - // if a command is runnable, deprecated, or hidden it is not a 'help' command - if c.Runnable() || len(c.Deprecated) != 0 || c.Hidden { - return false - } - - // if any non-help sub commands are found, the command is not a 'help' command - for _, sub := range c.commands { - if !sub.IsAdditionalHelpTopicCommand() { - return false - } - } - - // the command either has no sub commands, or no non-help sub commands - return true -} - -// HasHelpSubCommands determines if a command has any available 'help' sub commands -// that need to be shown in the usage/help default template under 'additional help -// topics'. -func (c *Command) HasHelpSubCommands() bool { - // return true on the first found available 'help' sub command - for _, sub := range c.commands { - if sub.IsAdditionalHelpTopicCommand() { - return true - } - } - - // the command either has no sub commands, or no available 'help' sub commands - return false -} - -// HasAvailableSubCommands determines if a command has available sub commands that -// need to be shown in the usage/help default template under 'available commands'. -func (c *Command) HasAvailableSubCommands() bool { - // return true on the first found available (non deprecated/help/hidden) - // sub command - for _, sub := range c.commands { - if sub.IsAvailableCommand() { - return true - } - } - - // the command either has no sub commands, or no available (non deprecated/help/hidden) - // sub commands - return false -} - -// HasParent determines if the command is a child command. -func (c *Command) HasParent() bool { - return c.parent != nil -} - -// GlobalNormalizationFunc returns the global normalization function or nil if it doesn't exist. -func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) flag.NormalizedName { - return c.globNormFunc -} - -// Flags returns the complete FlagSet that applies -// to this command (local and persistent declared here and by all parents). -func (c *Command) Flags() *flag.FlagSet { - if c.flags == nil { - c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - if c.flagErrorBuf == nil { - c.flagErrorBuf = new(bytes.Buffer) - } - c.flags.SetOutput(c.flagErrorBuf) - } - - return c.flags -} - -// LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands. -func (c *Command) LocalNonPersistentFlags() *flag.FlagSet { - persistentFlags := c.PersistentFlags() - - out := flag.NewFlagSet(c.Name(), flag.ContinueOnError) - c.LocalFlags().VisitAll(func(f *flag.Flag) { - if persistentFlags.Lookup(f.Name) == nil { - out.AddFlag(f) - } - }) - return out -} - -// LocalFlags returns the local FlagSet specifically set in the current command. -func (c *Command) LocalFlags() *flag.FlagSet { - c.mergePersistentFlags() - - if c.lflags == nil { - c.lflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - if c.flagErrorBuf == nil { - c.flagErrorBuf = new(bytes.Buffer) - } - c.lflags.SetOutput(c.flagErrorBuf) - } - c.lflags.SortFlags = c.Flags().SortFlags - if c.globNormFunc != nil { - c.lflags.SetNormalizeFunc(c.globNormFunc) - } - - addToLocal := func(f *flag.Flag) { - if c.lflags.Lookup(f.Name) == nil && c.parentsPflags.Lookup(f.Name) == nil { - c.lflags.AddFlag(f) - } - } - c.Flags().VisitAll(addToLocal) - c.PersistentFlags().VisitAll(addToLocal) - return c.lflags -} - -// InheritedFlags returns all flags which were inherited from parent commands. -func (c *Command) InheritedFlags() *flag.FlagSet { - c.mergePersistentFlags() - - if c.iflags == nil { - c.iflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - if c.flagErrorBuf == nil { - c.flagErrorBuf = new(bytes.Buffer) - } - c.iflags.SetOutput(c.flagErrorBuf) - } - - local := c.LocalFlags() - if c.globNormFunc != nil { - c.iflags.SetNormalizeFunc(c.globNormFunc) - } - - c.parentsPflags.VisitAll(func(f *flag.Flag) { - if c.iflags.Lookup(f.Name) == nil && local.Lookup(f.Name) == nil { - c.iflags.AddFlag(f) - } - }) - return c.iflags -} - -// NonInheritedFlags returns all flags which were not inherited from parent commands. -func (c *Command) NonInheritedFlags() *flag.FlagSet { - return c.LocalFlags() -} - -// PersistentFlags returns the persistent FlagSet specifically set in the current command. -func (c *Command) PersistentFlags() *flag.FlagSet { - if c.pflags == nil { - c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - if c.flagErrorBuf == nil { - c.flagErrorBuf = new(bytes.Buffer) - } - c.pflags.SetOutput(c.flagErrorBuf) - } - return c.pflags -} - -// ResetFlags deletes all flags from command. -func (c *Command) ResetFlags() { - c.flagErrorBuf = new(bytes.Buffer) - c.flagErrorBuf.Reset() - c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - c.flags.SetOutput(c.flagErrorBuf) - c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - c.pflags.SetOutput(c.flagErrorBuf) - - c.lflags = nil - c.iflags = nil - c.parentsPflags = nil -} - -// HasFlags checks if the command contains any flags (local plus persistent from the entire structure). -func (c *Command) HasFlags() bool { - return c.Flags().HasFlags() -} - -// HasPersistentFlags checks if the command contains persistent flags. -func (c *Command) HasPersistentFlags() bool { - return c.PersistentFlags().HasFlags() -} - -// HasLocalFlags checks if the command has flags specifically declared locally. -func (c *Command) HasLocalFlags() bool { - return c.LocalFlags().HasFlags() -} - -// HasInheritedFlags checks if the command has flags inherited from its parent command. -func (c *Command) HasInheritedFlags() bool { - return c.InheritedFlags().HasFlags() -} - -// HasAvailableFlags checks if the command contains any flags (local plus persistent from the entire -// structure) which are not hidden or deprecated. -func (c *Command) HasAvailableFlags() bool { - return c.Flags().HasAvailableFlags() -} - -// HasAvailablePersistentFlags checks if the command contains persistent flags which are not hidden or deprecated. -func (c *Command) HasAvailablePersistentFlags() bool { - return c.PersistentFlags().HasAvailableFlags() -} - -// HasAvailableLocalFlags checks if the command has flags specifically declared locally which are not hidden -// or deprecated. -func (c *Command) HasAvailableLocalFlags() bool { - return c.LocalFlags().HasAvailableFlags() -} - -// HasAvailableInheritedFlags checks if the command has flags inherited from its parent command which are -// not hidden or deprecated. -func (c *Command) HasAvailableInheritedFlags() bool { - return c.InheritedFlags().HasAvailableFlags() -} - -// Flag climbs up the command tree looking for matching flag. -func (c *Command) Flag(name string) (flag *flag.Flag) { - flag = c.Flags().Lookup(name) - - if flag == nil { - flag = c.persistentFlag(name) - } - - return -} - -// Recursively find matching persistent flag. -func (c *Command) persistentFlag(name string) (flag *flag.Flag) { - if c.HasPersistentFlags() { - flag = c.PersistentFlags().Lookup(name) - } - - if flag == nil { - c.updateParentsPflags() - flag = c.parentsPflags.Lookup(name) - } - return -} - -// ParseFlags parses persistent flag tree and local flags. -func (c *Command) ParseFlags(args []string) error { - if c.DisableFlagParsing { - return nil - } - - if c.flagErrorBuf == nil { - c.flagErrorBuf = new(bytes.Buffer) - } - beforeErrorBufLen := c.flagErrorBuf.Len() - c.mergePersistentFlags() - - // do it here after merging all flags and just before parse - c.Flags().ParseErrorsWhitelist = flag.ParseErrorsWhitelist(c.FParseErrWhitelist) - - err := c.Flags().Parse(args) - // Print warnings if they occurred (e.g. deprecated flag messages). - if c.flagErrorBuf.Len()-beforeErrorBufLen > 0 && err == nil { - c.Print(c.flagErrorBuf.String()) - } - - return err -} - -// Parent returns a commands parent command. -func (c *Command) Parent() *Command { - return c.parent -} - -// mergePersistentFlags merges c.PersistentFlags() to c.Flags() -// and adds missing persistent flags of all parents. -func (c *Command) mergePersistentFlags() { - c.updateParentsPflags() - c.Flags().AddFlagSet(c.PersistentFlags()) - c.Flags().AddFlagSet(c.parentsPflags) -} - -// updateParentsPflags updates c.parentsPflags by adding -// new persistent flags of all parents. -// If c.parentsPflags == nil, it makes new. -func (c *Command) updateParentsPflags() { - if c.parentsPflags == nil { - c.parentsPflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - c.parentsPflags.SetOutput(c.flagErrorBuf) - c.parentsPflags.SortFlags = false - } - - if c.globNormFunc != nil { - c.parentsPflags.SetNormalizeFunc(c.globNormFunc) - } - - c.Root().PersistentFlags().AddFlagSet(flag.CommandLine) - - c.VisitParents(func(parent *Command) { - c.parentsPflags.AddFlagSet(parent.PersistentFlags()) - }) -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/command_notwin.go b/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/command_notwin.go deleted file mode 100644 index 6159c1cc19d..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/command_notwin.go +++ /dev/null @@ -1,5 +0,0 @@ -// +build !windows - -package cobra - -var preExecHookFn func(*Command) diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/command_win.go b/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/command_win.go deleted file mode 100644 index 8768b1736dc..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/command_win.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build windows - -package cobra - -import ( - "fmt" - "os" - "time" - - "github.com/inconshreveable/mousetrap" -) - -var preExecHookFn = preExecHook - -func preExecHook(c *Command) { - if MousetrapHelpText != "" && mousetrap.StartedByExplorer() { - c.Print(MousetrapHelpText) - if MousetrapDisplayDuration > 0 { - time.Sleep(MousetrapDisplayDuration) - } else { - c.Println("Press return to continue...") - fmt.Scanln() - } - os.Exit(1) - } -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/custom_completions.go b/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/custom_completions.go deleted file mode 100644 index ba57327c153..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/custom_completions.go +++ /dev/null @@ -1,384 +0,0 @@ -package cobra - -import ( - "errors" - "fmt" - "os" - "strings" - - "github.com/spf13/pflag" -) - -const ( - // ShellCompRequestCmd is the name of the hidden command that is used to request - // completion results from the program. It is used by the shell completion scripts. - ShellCompRequestCmd = "__complete" - // ShellCompNoDescRequestCmd is the name of the hidden command that is used to request - // completion results without their description. It is used by the shell completion scripts. - ShellCompNoDescRequestCmd = "__completeNoDesc" -) - -// Global map of flag completion functions. -var flagCompletionFunctions = map[*pflag.Flag]func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective){} - -// ShellCompDirective is a bit map representing the different behaviors the shell -// can be instructed to have once completions have been provided. -type ShellCompDirective int - -const ( - // ShellCompDirectiveError indicates an error occurred and completions should be ignored. - ShellCompDirectiveError ShellCompDirective = 1 << iota - - // ShellCompDirectiveNoSpace indicates that the shell should not add a space - // after the completion even if there is a single completion provided. - ShellCompDirectiveNoSpace - - // ShellCompDirectiveNoFileComp indicates that the shell should not provide - // file completion even when no completion is provided. - // This currently does not work for zsh or bash < 4 - ShellCompDirectiveNoFileComp - - // ShellCompDirectiveDefault indicates to let the shell perform its default - // behavior after completions have been provided. - ShellCompDirectiveDefault ShellCompDirective = 0 -) - -// RegisterFlagCompletionFunc should be called to register a function to provide completion for a flag. -func (c *Command) RegisterFlagCompletionFunc(flagName string, f func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)) error { - flag := c.Flag(flagName) - if flag == nil { - return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' does not exist", flagName) - } - if _, exists := flagCompletionFunctions[flag]; exists { - return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' already registered", flagName) - } - flagCompletionFunctions[flag] = f - return nil -} - -// Returns a string listing the different directive enabled in the specified parameter -func (d ShellCompDirective) string() string { - var directives []string - if d&ShellCompDirectiveError != 0 { - directives = append(directives, "ShellCompDirectiveError") - } - if d&ShellCompDirectiveNoSpace != 0 { - directives = append(directives, "ShellCompDirectiveNoSpace") - } - if d&ShellCompDirectiveNoFileComp != 0 { - directives = append(directives, "ShellCompDirectiveNoFileComp") - } - if len(directives) == 0 { - directives = append(directives, "ShellCompDirectiveDefault") - } - - if d > ShellCompDirectiveError+ShellCompDirectiveNoSpace+ShellCompDirectiveNoFileComp { - return fmt.Sprintf("ERROR: unexpected ShellCompDirective value: %d", d) - } - return strings.Join(directives, ", ") -} - -// Adds a special hidden command that can be used to request custom completions. -func (c *Command) initCompleteCmd(args []string) { - completeCmd := &Command{ - Use: fmt.Sprintf("%s [command-line]", ShellCompRequestCmd), - Aliases: []string{ShellCompNoDescRequestCmd}, - DisableFlagsInUseLine: true, - Hidden: true, - DisableFlagParsing: true, - Args: MinimumNArgs(1), - Short: "Request shell completion choices for the specified command-line", - Long: fmt.Sprintf("%[2]s is a special command that is used by the shell completion logic\n%[1]s", - "to request completion choices for the specified command-line.", ShellCompRequestCmd), - Run: func(cmd *Command, args []string) { - finalCmd, completions, directive, err := cmd.getCompletions(args) - if err != nil { - CompErrorln(err.Error()) - // Keep going for multiple reasons: - // 1- There could be some valid completions even though there was an error - // 2- Even without completions, we need to print the directive - } - - noDescriptions := (cmd.CalledAs() == ShellCompNoDescRequestCmd) - for _, comp := range completions { - if noDescriptions { - // Remove any description that may be included following a tab character. - comp = strings.Split(comp, "\t")[0] - } - // Print each possible completion to stdout for the completion script to consume. - fmt.Fprintln(finalCmd.OutOrStdout(), comp) - } - - if directive > ShellCompDirectiveError+ShellCompDirectiveNoSpace+ShellCompDirectiveNoFileComp { - directive = ShellCompDirectiveDefault - } - - // As the last printout, print the completion directive for the completion script to parse. - // The directive integer must be that last character following a single colon (:). - // The completion script expects : - fmt.Fprintf(finalCmd.OutOrStdout(), ":%d\n", directive) - - // Print some helpful info to stderr for the user to understand. - // Output from stderr must be ignored by the completion script. - fmt.Fprintf(finalCmd.ErrOrStderr(), "Completion ended with directive: %s\n", directive.string()) - }, - } - c.AddCommand(completeCmd) - subCmd, _, err := c.Find(args) - if err != nil || subCmd.Name() != ShellCompRequestCmd { - // Only create this special command if it is actually being called. - // This reduces possible side-effects of creating such a command; - // for example, having this command would cause problems to a - // cobra program that only consists of the root command, since this - // command would cause the root command to suddenly have a subcommand. - c.RemoveCommand(completeCmd) - } -} - -func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDirective, error) { - var completions []string - - // The last argument, which is not completely typed by the user, - // should not be part of the list of arguments - toComplete := args[len(args)-1] - trimmedArgs := args[:len(args)-1] - - // Find the real command for which completion must be performed - finalCmd, finalArgs, err := c.Root().Find(trimmedArgs) - if err != nil { - // Unable to find the real command. E.g., someInvalidCmd - return c, completions, ShellCompDirectiveDefault, fmt.Errorf("Unable to find a command for arguments: %v", trimmedArgs) - } - - // When doing completion of a flag name, as soon as an argument starts with - // a '-' we know it is a flag. We cannot use isFlagArg() here as it requires - // the flag to be complete - if len(toComplete) > 0 && toComplete[0] == '-' && !strings.Contains(toComplete, "=") { - // We are completing a flag name - finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { - completions = append(completions, getFlagNameCompletions(flag, toComplete)...) - }) - finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) { - completions = append(completions, getFlagNameCompletions(flag, toComplete)...) - }) - - directive := ShellCompDirectiveDefault - if len(completions) > 0 { - if strings.HasSuffix(completions[0], "=") { - directive = ShellCompDirectiveNoSpace - } - } - return finalCmd, completions, directive, nil - } - - var flag *pflag.Flag - if !finalCmd.DisableFlagParsing { - // We only do flag completion if we are allowed to parse flags - // This is important for commands which have requested to do their own flag completion. - flag, finalArgs, toComplete, err = checkIfFlagCompletion(finalCmd, finalArgs, toComplete) - if err != nil { - // Error while attempting to parse flags - return finalCmd, completions, ShellCompDirectiveDefault, err - } - } - - if flag == nil { - // Complete subcommand names - for _, subCmd := range finalCmd.Commands() { - if subCmd.IsAvailableCommand() && strings.HasPrefix(subCmd.Name(), toComplete) { - completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short)) - } - } - - if len(finalCmd.ValidArgs) > 0 { - // Always complete ValidArgs, even if we are completing a subcommand name. - // This is for commands that have both subcommands and ValidArgs. - for _, validArg := range finalCmd.ValidArgs { - if strings.HasPrefix(validArg, toComplete) { - completions = append(completions, validArg) - } - } - - // If there are ValidArgs specified (even if they don't match), we stop completion. - // Only one of ValidArgs or ValidArgsFunction can be used for a single command. - return finalCmd, completions, ShellCompDirectiveNoFileComp, nil - } - - // Always let the logic continue so as to add any ValidArgsFunction completions, - // even if we already found sub-commands. - // This is for commands that have subcommands but also specify a ValidArgsFunction. - } - - // Parse the flags and extract the arguments to prepare for calling the completion function - if err = finalCmd.ParseFlags(finalArgs); err != nil { - return finalCmd, completions, ShellCompDirectiveDefault, fmt.Errorf("Error while parsing flags from args %v: %s", finalArgs, err.Error()) - } - - // We only remove the flags from the arguments if DisableFlagParsing is not set. - // This is important for commands which have requested to do their own flag completion. - if !finalCmd.DisableFlagParsing { - finalArgs = finalCmd.Flags().Args() - } - - // Find the completion function for the flag or command - var completionFn func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) - if flag != nil { - completionFn = flagCompletionFunctions[flag] - } else { - completionFn = finalCmd.ValidArgsFunction - } - if completionFn == nil { - // Go custom completion not supported/needed for this flag or command - return finalCmd, completions, ShellCompDirectiveDefault, nil - } - - // Call the registered completion function to get the completions - comps, directive := completionFn(finalCmd, finalArgs, toComplete) - completions = append(completions, comps...) - return finalCmd, completions, directive, nil -} - -func getFlagNameCompletions(flag *pflag.Flag, toComplete string) []string { - if nonCompletableFlag(flag) { - return []string{} - } - - var completions []string - flagName := "--" + flag.Name - if strings.HasPrefix(flagName, toComplete) { - // Flag without the = - completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) - - if len(flag.NoOptDefVal) == 0 { - // Flag requires a value, so it can be suffixed with = - flagName += "=" - completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) - } - } - - flagName = "-" + flag.Shorthand - if len(flag.Shorthand) > 0 && strings.HasPrefix(flagName, toComplete) { - completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) - } - - return completions -} - -func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*pflag.Flag, []string, string, error) { - var flagName string - trimmedArgs := args - flagWithEqual := false - if isFlagArg(lastArg) { - if index := strings.Index(lastArg, "="); index >= 0 { - flagName = strings.TrimLeft(lastArg[:index], "-") - lastArg = lastArg[index+1:] - flagWithEqual = true - } else { - return nil, nil, "", errors.New("Unexpected completion request for flag") - } - } - - if len(flagName) == 0 { - if len(args) > 0 { - prevArg := args[len(args)-1] - if isFlagArg(prevArg) { - // Only consider the case where the flag does not contain an =. - // If the flag contains an = it means it has already been fully processed, - // so we don't need to deal with it here. - if index := strings.Index(prevArg, "="); index < 0 { - flagName = strings.TrimLeft(prevArg, "-") - - // Remove the uncompleted flag or else there could be an error created - // for an invalid value for that flag - trimmedArgs = args[:len(args)-1] - } - } - } - } - - if len(flagName) == 0 { - // Not doing flag completion - return nil, trimmedArgs, lastArg, nil - } - - flag := findFlag(finalCmd, flagName) - if flag == nil { - // Flag not supported by this command, nothing to complete - err := fmt.Errorf("Subcommand '%s' does not support flag '%s'", finalCmd.Name(), flagName) - return nil, nil, "", err - } - - if !flagWithEqual { - if len(flag.NoOptDefVal) != 0 { - // We had assumed dealing with a two-word flag but the flag is a boolean flag. - // In that case, there is no value following it, so we are not really doing flag completion. - // Reset everything to do noun completion. - trimmedArgs = args - flag = nil - } - } - - return flag, trimmedArgs, lastArg, nil -} - -func findFlag(cmd *Command, name string) *pflag.Flag { - flagSet := cmd.Flags() - if len(name) == 1 { - // First convert the short flag into a long flag - // as the cmd.Flag() search only accepts long flags - if short := flagSet.ShorthandLookup(name); short != nil { - name = short.Name - } else { - set := cmd.InheritedFlags() - if short = set.ShorthandLookup(name); short != nil { - name = short.Name - } else { - return nil - } - } - } - return cmd.Flag(name) -} - -// CompDebug prints the specified string to the same file as where the -// completion script prints its logs. -// Note that completion printouts should never be on stdout as they would -// be wrongly interpreted as actual completion choices by the completion script. -func CompDebug(msg string, printToStdErr bool) { - msg = fmt.Sprintf("[Debug] %s", msg) - - // Such logs are only printed when the user has set the environment - // variable BASH_COMP_DEBUG_FILE to the path of some file to be used. - if path := os.Getenv("BASH_COMP_DEBUG_FILE"); path != "" { - f, err := os.OpenFile(path, - os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) - if err == nil { - defer f.Close() - f.WriteString(msg) - } - } - - if printToStdErr { - // Must print to stderr for this not to be read by the completion script. - fmt.Fprintf(os.Stderr, msg) - } -} - -// CompDebugln prints the specified string with a newline at the end -// to the same file as where the completion script prints its logs. -// Such logs are only printed when the user has set the environment -// variable BASH_COMP_DEBUG_FILE to the path of some file to be used. -func CompDebugln(msg string, printToStdErr bool) { - CompDebug(fmt.Sprintf("%s\n", msg), printToStdErr) -} - -// CompError prints the specified completion message to stderr. -func CompError(msg string) { - msg = fmt.Sprintf("[Error] %s", msg) - CompDebug(msg, true) -} - -// CompErrorln prints the specified completion message to stderr with a newline at the end. -func CompErrorln(msg string) { - CompError(fmt.Sprintf("%s\n", msg)) -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/fish_completions.go b/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/fish_completions.go deleted file mode 100644 index c83609c83b6..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/fish_completions.go +++ /dev/null @@ -1,172 +0,0 @@ -package cobra - -import ( - "bytes" - "fmt" - "io" - "os" -) - -func genFishComp(buf *bytes.Buffer, name string, includeDesc bool) { - compCmd := ShellCompRequestCmd - if !includeDesc { - compCmd = ShellCompNoDescRequestCmd - } - buf.WriteString(fmt.Sprintf("# fish completion for %-36s -*- shell-script -*-\n", name)) - buf.WriteString(fmt.Sprintf(` -function __%[1]s_debug - set file "$BASH_COMP_DEBUG_FILE" - if test -n "$file" - echo "$argv" >> $file - end -end - -function __%[1]s_perform_completion - __%[1]s_debug "Starting __%[1]s_perform_completion with: $argv" - - set args (string split -- " " "$argv") - set lastArg "$args[-1]" - - __%[1]s_debug "args: $args" - __%[1]s_debug "last arg: $lastArg" - - set emptyArg "" - if test -z "$lastArg" - __%[1]s_debug "Setting emptyArg" - set emptyArg \"\" - end - __%[1]s_debug "emptyArg: $emptyArg" - - set requestComp "$args[1] %[2]s $args[2..-1] $emptyArg" - __%[1]s_debug "Calling $requestComp" - - set results (eval $requestComp 2> /dev/null) - set comps $results[1..-2] - set directiveLine $results[-1] - - # For Fish, when completing a flag with an = (e.g., -n=) - # completions must be prefixed with the flag - set flagPrefix (string match -r -- '-.*=' "$lastArg") - - __%[1]s_debug "Comps: $comps" - __%[1]s_debug "DirectiveLine: $directiveLine" - __%[1]s_debug "flagPrefix: $flagPrefix" - - for comp in $comps - printf "%%s%%s\n" "$flagPrefix" "$comp" - end - - printf "%%s\n" "$directiveLine" -end - -# This function does three things: -# 1- Obtain the completions and store them in the global __%[1]s_comp_results -# 2- Set the __%[1]s_comp_do_file_comp flag if file completion should be performed -# and unset it otherwise -# 3- Return true if the completion results are not empty -function __%[1]s_prepare_completions - # Start fresh - set --erase __%[1]s_comp_do_file_comp - set --erase __%[1]s_comp_results - - # Check if the command-line is already provided. This is useful for testing. - if not set --query __%[1]s_comp_commandLine - set __%[1]s_comp_commandLine (commandline) - end - __%[1]s_debug "commandLine is: $__%[1]s_comp_commandLine" - - set results (__%[1]s_perform_completion "$__%[1]s_comp_commandLine") - set --erase __%[1]s_comp_commandLine - __%[1]s_debug "Completion results: $results" - - if test -z "$results" - __%[1]s_debug "No completion, probably due to a failure" - # Might as well do file completion, in case it helps - set --global __%[1]s_comp_do_file_comp 1 - return 0 - end - - set directive (string sub --start 2 $results[-1]) - set --global __%[1]s_comp_results $results[1..-2] - - __%[1]s_debug "Completions are: $__%[1]s_comp_results" - __%[1]s_debug "Directive is: $directive" - - if test -z "$directive" - set directive 0 - end - - set compErr (math (math --scale 0 $directive / %[3]d) %% 2) - if test $compErr -eq 1 - __%[1]s_debug "Received error directive: aborting." - # Might as well do file completion, in case it helps - set --global __%[1]s_comp_do_file_comp 1 - return 0 - end - - set nospace (math (math --scale 0 $directive / %[4]d) %% 2) - set nofiles (math (math --scale 0 $directive / %[5]d) %% 2) - - __%[1]s_debug "nospace: $nospace, nofiles: $nofiles" - - # Important not to quote the variable for count to work - set numComps (count $__%[1]s_comp_results) - __%[1]s_debug "numComps: $numComps" - - if test $numComps -eq 1; and test $nospace -ne 0 - # To support the "nospace" directive we trick the shell - # by outputting an extra, longer completion. - __%[1]s_debug "Adding second completion to perform nospace directive" - set --append __%[1]s_comp_results $__%[1]s_comp_results[1]. - end - - if test $numComps -eq 0; and test $nofiles -eq 0 - __%[1]s_debug "Requesting file completion" - set --global __%[1]s_comp_do_file_comp 1 - end - - # If we don't want file completion, we must return true even if there - # are no completions found. This is because fish will perform the last - # completion command, even if its condition is false, if no other - # completion command was triggered - return (not set --query __%[1]s_comp_do_file_comp) -end - -# Remove any pre-existing completions for the program since we will be handling all of them -# TODO this cleanup is not sufficient. Fish completions are only loaded once the user triggers -# them, so the below deletion will not work as it is run too early. What else can we do? -complete -c %[1]s -e - -# The order in which the below two lines are defined is very important so that __%[1]s_prepare_completions -# is called first. It is __%[1]s_prepare_completions that sets up the __%[1]s_comp_do_file_comp variable. -# -# This completion will be run second as complete commands are added FILO. -# It triggers file completion choices when __%[1]s_comp_do_file_comp is set. -complete -c %[1]s -n 'set --query __%[1]s_comp_do_file_comp' - -# This completion will be run first as complete commands are added FILO. -# The call to __%[1]s_prepare_completions will setup both __%[1]s_comp_results abd __%[1]s_comp_do_file_comp. -# It provides the program's completion choices. -complete -c %[1]s -n '__%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results' - -`, name, compCmd, ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp)) -} - -// GenFishCompletion generates fish completion file and writes to the passed writer. -func (c *Command) GenFishCompletion(w io.Writer, includeDesc bool) error { - buf := new(bytes.Buffer) - genFishComp(buf, c.Name(), includeDesc) - _, err := buf.WriteTo(w) - return err -} - -// GenFishCompletionFile generates fish completion file. -func (c *Command) GenFishCompletionFile(filename string, includeDesc bool) error { - outFile, err := os.Create(filename) - if err != nil { - return err - } - defer outFile.Close() - - return c.GenFishCompletion(outFile, includeDesc) -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/fish_completions.md b/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/fish_completions.md deleted file mode 100644 index 6bfe5f88ef2..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/fish_completions.md +++ /dev/null @@ -1,7 +0,0 @@ -## Generating Fish Completions for your own cobra.Command - -Cobra supports native Fish completions generated from the root `cobra.Command`. You can use the `command.GenFishCompletion()` or `command.GenFishCompletionFile()` functions. You must provide these functions with a parameter indicating if the completions should be annotated with a description; Cobra will provide the description automatically based on usage information. You can choose to make this option configurable by your users. - -### Limitations - -* Custom completions implemented using the `ValidArgsFunction` and `RegisterFlagCompletionFunc()` are supported automatically but the ones implemented in Bash scripting are not. diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/go.mod b/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/go.mod deleted file mode 100644 index dea1030ba43..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/go.mod +++ /dev/null @@ -1,12 +0,0 @@ -module github.com/spf13/cobra - -go 1.12 - -require ( - github.com/cpuguy83/go-md2man/v2 v2.0.0 - github.com/inconshreveable/mousetrap v1.0.0 - github.com/mitchellh/go-homedir v1.1.0 - github.com/spf13/pflag v1.0.3 - github.com/spf13/viper v1.4.0 - gopkg.in/yaml.v2 v2.2.2 -) diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/go.sum b/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/go.sum deleted file mode 100644 index 3aaa2ac0fd6..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/go.sum +++ /dev/null @@ -1,149 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU= -github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/powershell_completions.go b/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/powershell_completions.go deleted file mode 100644 index 756c61b9dcb..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/powershell_completions.go +++ /dev/null @@ -1,100 +0,0 @@ -// PowerShell completions are based on the amazing work from clap: -// https://github.com/clap-rs/clap/blob/3294d18efe5f264d12c9035f404c7d189d4824e1/src/completions/powershell.rs -// -// The generated scripts require PowerShell v5.0+ (which comes Windows 10, but -// can be downloaded separately for windows 7 or 8.1). - -package cobra - -import ( - "bytes" - "fmt" - "io" - "os" - "strings" - - "github.com/spf13/pflag" -) - -var powerShellCompletionTemplate = `using namespace System.Management.Automation -using namespace System.Management.Automation.Language -Register-ArgumentCompleter -Native -CommandName '%s' -ScriptBlock { - param($wordToComplete, $commandAst, $cursorPosition) - $commandElements = $commandAst.CommandElements - $command = @( - '%s' - for ($i = 1; $i -lt $commandElements.Count; $i++) { - $element = $commandElements[$i] - if ($element -isnot [StringConstantExpressionAst] -or - $element.StringConstantType -ne [StringConstantType]::BareWord -or - $element.Value.StartsWith('-')) { - break - } - $element.Value - } - ) -join ';' - $completions = @(switch ($command) {%s - }) - $completions.Where{ $_.CompletionText -like "$wordToComplete*" } | - Sort-Object -Property ListItemText -}` - -func generatePowerShellSubcommandCases(out io.Writer, cmd *Command, previousCommandName string) { - var cmdName string - if previousCommandName == "" { - cmdName = cmd.Name() - } else { - cmdName = fmt.Sprintf("%s;%s", previousCommandName, cmd.Name()) - } - - fmt.Fprintf(out, "\n '%s' {", cmdName) - - cmd.Flags().VisitAll(func(flag *pflag.Flag) { - if nonCompletableFlag(flag) { - return - } - usage := escapeStringForPowerShell(flag.Usage) - if len(flag.Shorthand) > 0 { - fmt.Fprintf(out, "\n [CompletionResult]::new('-%s', '%s', [CompletionResultType]::ParameterName, '%s')", flag.Shorthand, flag.Shorthand, usage) - } - fmt.Fprintf(out, "\n [CompletionResult]::new('--%s', '%s', [CompletionResultType]::ParameterName, '%s')", flag.Name, flag.Name, usage) - }) - - for _, subCmd := range cmd.Commands() { - usage := escapeStringForPowerShell(subCmd.Short) - fmt.Fprintf(out, "\n [CompletionResult]::new('%s', '%s', [CompletionResultType]::ParameterValue, '%s')", subCmd.Name(), subCmd.Name(), usage) - } - - fmt.Fprint(out, "\n break\n }") - - for _, subCmd := range cmd.Commands() { - generatePowerShellSubcommandCases(out, subCmd, cmdName) - } -} - -func escapeStringForPowerShell(s string) string { - return strings.Replace(s, "'", "''", -1) -} - -// GenPowerShellCompletion generates PowerShell completion file and writes to the passed writer. -func (c *Command) GenPowerShellCompletion(w io.Writer) error { - buf := new(bytes.Buffer) - - var subCommandCases bytes.Buffer - generatePowerShellSubcommandCases(&subCommandCases, c, "") - fmt.Fprintf(buf, powerShellCompletionTemplate, c.Name(), c.Name(), subCommandCases.String()) - - _, err := buf.WriteTo(w) - return err -} - -// GenPowerShellCompletionFile generates PowerShell completion file. -func (c *Command) GenPowerShellCompletionFile(filename string) error { - outFile, err := os.Create(filename) - if err != nil { - return err - } - defer outFile.Close() - - return c.GenPowerShellCompletion(outFile) -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/powershell_completions.md b/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/powershell_completions.md deleted file mode 100644 index afed8024087..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/powershell_completions.md +++ /dev/null @@ -1,14 +0,0 @@ -# Generating PowerShell Completions For Your Own cobra.Command - -Cobra can generate PowerShell completion scripts. Users need PowerShell version 5.0 or above, which comes with Windows 10 and can be downloaded separately for Windows 7 or 8.1. They can then write the completions to a file and source this file from their PowerShell profile, which is referenced by the `$Profile` environment variable. See `Get-Help about_Profiles` for more info about PowerShell profiles. - -# What's supported - -- Completion for subcommands using their `.Short` description -- Completion for non-hidden flags using their `.Name` and `.Shorthand` - -# What's not yet supported - -- Command aliases -- Required, filename or custom flags (they will work like normal flags) -- Custom completion scripts diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/shell_completions.go b/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/shell_completions.go deleted file mode 100644 index ba0af9cb553..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/shell_completions.go +++ /dev/null @@ -1,85 +0,0 @@ -package cobra - -import ( - "github.com/spf13/pflag" -) - -// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists, -// and causes your command to report an error if invoked without the flag. -func (c *Command) MarkFlagRequired(name string) error { - return MarkFlagRequired(c.Flags(), name) -} - -// MarkPersistentFlagRequired adds the BashCompOneRequiredFlag annotation to the named persistent flag if it exists, -// and causes your command to report an error if invoked without the flag. -func (c *Command) MarkPersistentFlagRequired(name string) error { - return MarkFlagRequired(c.PersistentFlags(), name) -} - -// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists, -// and causes your command to report an error if invoked without the flag. -func MarkFlagRequired(flags *pflag.FlagSet, name string) error { - return flags.SetAnnotation(name, BashCompOneRequiredFlag, []string{"true"}) -} - -// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag, if it exists. -// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. -func (c *Command) MarkFlagFilename(name string, extensions ...string) error { - return MarkFlagFilename(c.Flags(), name, extensions...) -} - -// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists. -// Generated bash autocompletion will call the bash function f for the flag. -func (c *Command) MarkFlagCustom(name string, f string) error { - return MarkFlagCustom(c.Flags(), name, f) -} - -// MarkPersistentFlagFilename instructs the various shell completion -// implementations to limit completions for this persistent flag to the -// specified extensions (patterns). -// -// Shell Completion compatibility matrix: bash, zsh -func (c *Command) MarkPersistentFlagFilename(name string, extensions ...string) error { - return MarkFlagFilename(c.PersistentFlags(), name, extensions...) -} - -// MarkFlagFilename instructs the various shell completion implementations to -// limit completions for this flag to the specified extensions (patterns). -// -// Shell Completion compatibility matrix: bash, zsh -func MarkFlagFilename(flags *pflag.FlagSet, name string, extensions ...string) error { - return flags.SetAnnotation(name, BashCompFilenameExt, extensions) -} - -// MarkFlagCustom instructs the various shell completion implementations to -// limit completions for this flag to the specified extensions (patterns). -// -// Shell Completion compatibility matrix: bash, zsh -func MarkFlagCustom(flags *pflag.FlagSet, name string, f string) error { - return flags.SetAnnotation(name, BashCompCustom, []string{f}) -} - -// MarkFlagDirname instructs the various shell completion implementations to -// complete only directories with this named flag. -// -// Shell Completion compatibility matrix: zsh -func (c *Command) MarkFlagDirname(name string) error { - return MarkFlagDirname(c.Flags(), name) -} - -// MarkPersistentFlagDirname instructs the various shell completion -// implementations to complete only directories with this persistent named flag. -// -// Shell Completion compatibility matrix: zsh -func (c *Command) MarkPersistentFlagDirname(name string) error { - return MarkFlagDirname(c.PersistentFlags(), name) -} - -// MarkFlagDirname instructs the various shell completion implementations to -// complete only directories with this specified flag. -// -// Shell Completion compatibility matrix: zsh -func MarkFlagDirname(flags *pflag.FlagSet, name string) error { - zshPattern := "-(/)" - return flags.SetAnnotation(name, zshCompDirname, []string{zshPattern}) -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/zsh_completions.go b/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/zsh_completions.go deleted file mode 100644 index 12755482f0c..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/zsh_completions.go +++ /dev/null @@ -1,336 +0,0 @@ -package cobra - -import ( - "encoding/json" - "fmt" - "io" - "os" - "sort" - "strings" - "text/template" - - "github.com/spf13/pflag" -) - -const ( - zshCompArgumentAnnotation = "cobra_annotations_zsh_completion_argument_annotation" - zshCompArgumentFilenameComp = "cobra_annotations_zsh_completion_argument_file_completion" - zshCompArgumentWordComp = "cobra_annotations_zsh_completion_argument_word_completion" - zshCompDirname = "cobra_annotations_zsh_dirname" -) - -var ( - zshCompFuncMap = template.FuncMap{ - "genZshFuncName": zshCompGenFuncName, - "extractFlags": zshCompExtractFlag, - "genFlagEntryForZshArguments": zshCompGenFlagEntryForArguments, - "extractArgsCompletions": zshCompExtractArgumentCompletionHintsForRendering, - } - zshCompletionText = ` -{{/* should accept Command (that contains subcommands) as parameter */}} -{{define "argumentsC" -}} -{{ $cmdPath := genZshFuncName .}} -function {{$cmdPath}} { - local -a commands - - _arguments -C \{{- range extractFlags .}} - {{genFlagEntryForZshArguments .}} \{{- end}} - "1: :->cmnds" \ - "*::arg:->args" - - case $state in - cmnds) - commands=({{range .Commands}}{{if not .Hidden}} - "{{.Name}}:{{.Short}}"{{end}}{{end}} - ) - _describe "command" commands - ;; - esac - - case "$words[1]" in {{- range .Commands}}{{if not .Hidden}} - {{.Name}}) - {{$cmdPath}}_{{.Name}} - ;;{{end}}{{end}} - esac -} -{{range .Commands}}{{if not .Hidden}} -{{template "selectCmdTemplate" .}} -{{- end}}{{end}} -{{- end}} - -{{/* should accept Command without subcommands as parameter */}} -{{define "arguments" -}} -function {{genZshFuncName .}} { -{{" _arguments"}}{{range extractFlags .}} \ - {{genFlagEntryForZshArguments . -}} -{{end}}{{range extractArgsCompletions .}} \ - {{.}}{{end}} -} -{{end}} - -{{/* dispatcher for commands with or without subcommands */}} -{{define "selectCmdTemplate" -}} -{{if .Hidden}}{{/* ignore hidden*/}}{{else -}} -{{if .Commands}}{{template "argumentsC" .}}{{else}}{{template "arguments" .}}{{end}} -{{- end}} -{{- end}} - -{{/* template entry point */}} -{{define "Main" -}} -#compdef _{{.Name}} {{.Name}} - -{{template "selectCmdTemplate" .}} -{{end}} -` -) - -// zshCompArgsAnnotation is used to encode/decode zsh completion for -// arguments to/from Command.Annotations. -type zshCompArgsAnnotation map[int]zshCompArgHint - -type zshCompArgHint struct { - // Indicates the type of the completion to use. One of: - // zshCompArgumentFilenameComp or zshCompArgumentWordComp - Tipe string `json:"type"` - - // A value for the type above (globs for file completion or words) - Options []string `json:"options"` -} - -// GenZshCompletionFile generates zsh completion file. -func (c *Command) GenZshCompletionFile(filename string) error { - outFile, err := os.Create(filename) - if err != nil { - return err - } - defer outFile.Close() - - return c.GenZshCompletion(outFile) -} - -// GenZshCompletion generates a zsh completion file and writes to the passed -// writer. The completion always run on the root command regardless of the -// command it was called from. -func (c *Command) GenZshCompletion(w io.Writer) error { - tmpl, err := template.New("Main").Funcs(zshCompFuncMap).Parse(zshCompletionText) - if err != nil { - return fmt.Errorf("error creating zsh completion template: %v", err) - } - return tmpl.Execute(w, c.Root()) -} - -// MarkZshCompPositionalArgumentFile marks the specified argument (first -// argument is 1) as completed by file selection. patterns (e.g. "*.txt") are -// optional - if not provided the completion will search for all files. -func (c *Command) MarkZshCompPositionalArgumentFile(argPosition int, patterns ...string) error { - if argPosition < 1 { - return fmt.Errorf("Invalid argument position (%d)", argPosition) - } - annotation, err := c.zshCompGetArgsAnnotations() - if err != nil { - return err - } - if c.zshcompArgsAnnotationnIsDuplicatePosition(annotation, argPosition) { - return fmt.Errorf("Duplicate annotation for positional argument at index %d", argPosition) - } - annotation[argPosition] = zshCompArgHint{ - Tipe: zshCompArgumentFilenameComp, - Options: patterns, - } - return c.zshCompSetArgsAnnotations(annotation) -} - -// MarkZshCompPositionalArgumentWords marks the specified positional argument -// (first argument is 1) as completed by the provided words. At east one word -// must be provided, spaces within words will be offered completion with -// "word\ word". -func (c *Command) MarkZshCompPositionalArgumentWords(argPosition int, words ...string) error { - if argPosition < 1 { - return fmt.Errorf("Invalid argument position (%d)", argPosition) - } - if len(words) == 0 { - return fmt.Errorf("Trying to set empty word list for positional argument %d", argPosition) - } - annotation, err := c.zshCompGetArgsAnnotations() - if err != nil { - return err - } - if c.zshcompArgsAnnotationnIsDuplicatePosition(annotation, argPosition) { - return fmt.Errorf("Duplicate annotation for positional argument at index %d", argPosition) - } - annotation[argPosition] = zshCompArgHint{ - Tipe: zshCompArgumentWordComp, - Options: words, - } - return c.zshCompSetArgsAnnotations(annotation) -} - -func zshCompExtractArgumentCompletionHintsForRendering(c *Command) ([]string, error) { - var result []string - annotation, err := c.zshCompGetArgsAnnotations() - if err != nil { - return nil, err - } - for k, v := range annotation { - s, err := zshCompRenderZshCompArgHint(k, v) - if err != nil { - return nil, err - } - result = append(result, s) - } - if len(c.ValidArgs) > 0 { - if _, positionOneExists := annotation[1]; !positionOneExists { - s, err := zshCompRenderZshCompArgHint(1, zshCompArgHint{ - Tipe: zshCompArgumentWordComp, - Options: c.ValidArgs, - }) - if err != nil { - return nil, err - } - result = append(result, s) - } - } - sort.Strings(result) - return result, nil -} - -func zshCompRenderZshCompArgHint(i int, z zshCompArgHint) (string, error) { - switch t := z.Tipe; t { - case zshCompArgumentFilenameComp: - var globs []string - for _, g := range z.Options { - globs = append(globs, fmt.Sprintf(`-g "%s"`, g)) - } - return fmt.Sprintf(`'%d: :_files %s'`, i, strings.Join(globs, " ")), nil - case zshCompArgumentWordComp: - var words []string - for _, w := range z.Options { - words = append(words, fmt.Sprintf("%q", w)) - } - return fmt.Sprintf(`'%d: :(%s)'`, i, strings.Join(words, " ")), nil - default: - return "", fmt.Errorf("Invalid zsh argument completion annotation: %s", t) - } -} - -func (c *Command) zshcompArgsAnnotationnIsDuplicatePosition(annotation zshCompArgsAnnotation, position int) bool { - _, dup := annotation[position] - return dup -} - -func (c *Command) zshCompGetArgsAnnotations() (zshCompArgsAnnotation, error) { - annotation := make(zshCompArgsAnnotation) - annotationString, ok := c.Annotations[zshCompArgumentAnnotation] - if !ok { - return annotation, nil - } - err := json.Unmarshal([]byte(annotationString), &annotation) - if err != nil { - return annotation, fmt.Errorf("Error unmarshaling zsh argument annotation: %v", err) - } - return annotation, nil -} - -func (c *Command) zshCompSetArgsAnnotations(annotation zshCompArgsAnnotation) error { - jsn, err := json.Marshal(annotation) - if err != nil { - return fmt.Errorf("Error marshaling zsh argument annotation: %v", err) - } - if c.Annotations == nil { - c.Annotations = make(map[string]string) - } - c.Annotations[zshCompArgumentAnnotation] = string(jsn) - return nil -} - -func zshCompGenFuncName(c *Command) string { - if c.HasParent() { - return zshCompGenFuncName(c.Parent()) + "_" + c.Name() - } - return "_" + c.Name() -} - -func zshCompExtractFlag(c *Command) []*pflag.Flag { - var flags []*pflag.Flag - c.LocalFlags().VisitAll(func(f *pflag.Flag) { - if !f.Hidden { - flags = append(flags, f) - } - }) - c.InheritedFlags().VisitAll(func(f *pflag.Flag) { - if !f.Hidden { - flags = append(flags, f) - } - }) - return flags -} - -// zshCompGenFlagEntryForArguments returns an entry that matches _arguments -// zsh-completion parameters. It's too complicated to generate in a template. -func zshCompGenFlagEntryForArguments(f *pflag.Flag) string { - if f.Name == "" || f.Shorthand == "" { - return zshCompGenFlagEntryForSingleOptionFlag(f) - } - return zshCompGenFlagEntryForMultiOptionFlag(f) -} - -func zshCompGenFlagEntryForSingleOptionFlag(f *pflag.Flag) string { - var option, multiMark, extras string - - if zshCompFlagCouldBeSpecifiedMoreThenOnce(f) { - multiMark = "*" - } - - option = "--" + f.Name - if option == "--" { - option = "-" + f.Shorthand - } - extras = zshCompGenFlagEntryExtras(f) - - return fmt.Sprintf(`'%s%s[%s]%s'`, multiMark, option, zshCompQuoteFlagDescription(f.Usage), extras) -} - -func zshCompGenFlagEntryForMultiOptionFlag(f *pflag.Flag) string { - var options, parenMultiMark, curlyMultiMark, extras string - - if zshCompFlagCouldBeSpecifiedMoreThenOnce(f) { - parenMultiMark = "*" - curlyMultiMark = "\\*" - } - - options = fmt.Sprintf(`'(%s-%s %s--%s)'{%s-%s,%s--%s}`, - parenMultiMark, f.Shorthand, parenMultiMark, f.Name, curlyMultiMark, f.Shorthand, curlyMultiMark, f.Name) - extras = zshCompGenFlagEntryExtras(f) - - return fmt.Sprintf(`%s'[%s]%s'`, options, zshCompQuoteFlagDescription(f.Usage), extras) -} - -func zshCompGenFlagEntryExtras(f *pflag.Flag) string { - if f.NoOptDefVal != "" { - return "" - } - - extras := ":" // allow options for flag (even without assistance) - for key, values := range f.Annotations { - switch key { - case zshCompDirname: - extras = fmt.Sprintf(":filename:_files -g %q", values[0]) - case BashCompFilenameExt: - extras = ":filename:_files" - for _, pattern := range values { - extras = extras + fmt.Sprintf(` -g "%s"`, pattern) - } - } - } - - return extras -} - -func zshCompFlagCouldBeSpecifiedMoreThenOnce(f *pflag.Flag) bool { - return strings.Contains(f.Value.Type(), "Slice") || - strings.Contains(f.Value.Type(), "Array") -} - -func zshCompQuoteFlagDescription(s string) string { - return strings.Replace(s, "'", `'\''`, -1) -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/zsh_completions.md b/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/zsh_completions.md deleted file mode 100644 index df9c2eac93c..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/cobra/zsh_completions.md +++ /dev/null @@ -1,39 +0,0 @@ -## Generating Zsh Completion for your cobra.Command - -Cobra supports native Zsh completion generated from the root `cobra.Command`. -The generated completion script should be put somewhere in your `$fpath` named -`_`. - -### What's Supported - -* Completion for all non-hidden subcommands using their `.Short` description. -* Completion for all non-hidden flags using the following rules: - * Filename completion works by marking the flag with `cmd.MarkFlagFilename...` - family of commands. - * The requirement for argument to the flag is decided by the `.NoOptDefVal` - flag value - if it's empty then completion will expect an argument. - * Flags of one of the various `*Array` and `*Slice` types supports multiple - specifications (with or without argument depending on the specific type). -* Completion of positional arguments using the following rules: - * Argument position for all options below starts at `1`. If argument position - `0` is requested it will raise an error. - * Use `command.MarkZshCompPositionalArgumentFile` to complete filenames. Glob - patterns (e.g. `"*.log"`) are optional - if not specified it will offer to - complete all file types. - * Use `command.MarkZshCompPositionalArgumentWords` to offer specific words for - completion. At least one word is required. - * It's possible to specify completion for some arguments and leave some - unspecified (e.g. offer words for second argument but nothing for first - argument). This will cause no completion for first argument but words - completion for second argument. - * If no argument completion was specified for 1st argument (but optionally was - specified for 2nd) and the command has `ValidArgs` it will be used as - completion options for 1st argument. - * Argument completions only offered for commands with no subcommands. - -### What's not yet Supported - -* Custom completion scripts are not supported yet (We should probably create zsh - specific one, doesn't make sense to re-use the bash one as the functions will - be different). -* Whatever other feature you're looking for and doesn't exist :) diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/.gitignore b/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/.gitignore deleted file mode 100644 index c3da2901346..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -.idea/* - diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/.travis.yml b/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/.travis.yml deleted file mode 100644 index f8a63b308ba..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/.travis.yml +++ /dev/null @@ -1,21 +0,0 @@ -sudo: false - -language: go - -go: - - 1.7.3 - - 1.8.1 - - tip - -matrix: - allow_failures: - - go: tip - -install: - - go get github.com/golang/lint/golint - - export PATH=$GOPATH/bin:$PATH - - go install ./... - -script: - - verify/all.sh -v - - go test ./... diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/LICENSE b/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/LICENSE deleted file mode 100644 index 63ed1cfea1f..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2012 Alex Ogier. All rights reserved. -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/README.md b/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/README.md deleted file mode 100644 index b052414d129..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/README.md +++ /dev/null @@ -1,296 +0,0 @@ -[![Build Status](https://travis-ci.org/spf13/pflag.svg?branch=master)](https://travis-ci.org/spf13/pflag) -[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/pflag)](https://goreportcard.com/report/github.com/spf13/pflag) -[![GoDoc](https://godoc.org/github.com/spf13/pflag?status.svg)](https://godoc.org/github.com/spf13/pflag) - -## Description - -pflag is a drop-in replacement for Go's flag package, implementing -POSIX/GNU-style --flags. - -pflag is compatible with the [GNU extensions to the POSIX recommendations -for command-line options][1]. For a more precise description, see the -"Command-line flag syntax" section below. - -[1]: http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html - -pflag is available under the same style of BSD license as the Go language, -which can be found in the LICENSE file. - -## Installation - -pflag is available using the standard `go get` command. - -Install by running: - - go get github.com/spf13/pflag - -Run tests by running: - - go test github.com/spf13/pflag - -## Usage - -pflag is a drop-in replacement of Go's native flag package. If you import -pflag under the name "flag" then all code should continue to function -with no changes. - -``` go -import flag "github.com/spf13/pflag" -``` - -There is one exception to this: if you directly instantiate the Flag struct -there is one more field "Shorthand" that you will need to set. -Most code never instantiates this struct directly, and instead uses -functions such as String(), BoolVar(), and Var(), and is therefore -unaffected. - -Define flags using flag.String(), Bool(), Int(), etc. - -This declares an integer flag, -flagname, stored in the pointer ip, with type *int. - -``` go -var ip *int = flag.Int("flagname", 1234, "help message for flagname") -``` - -If you like, you can bind the flag to a variable using the Var() functions. - -``` go -var flagvar int -func init() { - flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") -} -``` - -Or you can create custom flags that satisfy the Value interface (with -pointer receivers) and couple them to flag parsing by - -``` go -flag.Var(&flagVal, "name", "help message for flagname") -``` - -For such flags, the default value is just the initial value of the variable. - -After all flags are defined, call - -``` go -flag.Parse() -``` - -to parse the command line into the defined flags. - -Flags may then be used directly. If you're using the flags themselves, -they are all pointers; if you bind to variables, they're values. - -``` go -fmt.Println("ip has value ", *ip) -fmt.Println("flagvar has value ", flagvar) -``` - -There are helpers function to get values later if you have the FlagSet but -it was difficult to keep up with all of the flag pointers in your code. -If you have a pflag.FlagSet with a flag called 'flagname' of type int you -can use GetInt() to get the int value. But notice that 'flagname' must exist -and it must be an int. GetString("flagname") will fail. - -``` go -i, err := flagset.GetInt("flagname") -``` - -After parsing, the arguments after the flag are available as the -slice flag.Args() or individually as flag.Arg(i). -The arguments are indexed from 0 through flag.NArg()-1. - -The pflag package also defines some new functions that are not in flag, -that give one-letter shorthands for flags. You can use these by appending -'P' to the name of any function that defines a flag. - -``` go -var ip = flag.IntP("flagname", "f", 1234, "help message") -var flagvar bool -func init() { - flag.BoolVarP(&flagvar, "boolname", "b", true, "help message") -} -flag.VarP(&flagVal, "varname", "v", "help message") -``` - -Shorthand letters can be used with single dashes on the command line. -Boolean shorthand flags can be combined with other shorthand flags. - -The default set of command-line flags is controlled by -top-level functions. The FlagSet type allows one to define -independent sets of flags, such as to implement subcommands -in a command-line interface. The methods of FlagSet are -analogous to the top-level functions for the command-line -flag set. - -## Setting no option default values for flags - -After you create a flag it is possible to set the pflag.NoOptDefVal for -the given flag. Doing this changes the meaning of the flag slightly. If -a flag has a NoOptDefVal and the flag is set on the command line without -an option the flag will be set to the NoOptDefVal. For example given: - -``` go -var ip = flag.IntP("flagname", "f", 1234, "help message") -flag.Lookup("flagname").NoOptDefVal = "4321" -``` - -Would result in something like - -| Parsed Arguments | Resulting Value | -| ------------- | ------------- | -| --flagname=1357 | ip=1357 | -| --flagname | ip=4321 | -| [nothing] | ip=1234 | - -## Command line flag syntax - -``` ---flag // boolean flags, or flags with no option default values ---flag x // only on flags without a default value ---flag=x -``` - -Unlike the flag package, a single dash before an option means something -different than a double dash. Single dashes signify a series of shorthand -letters for flags. All but the last shorthand letter must be boolean flags -or a flag with a default value - -``` -// boolean or flags where the 'no option default value' is set --f --f=true --abc -but --b true is INVALID - -// non-boolean and flags without a 'no option default value' --n 1234 --n=1234 --n1234 - -// mixed --abcs "hello" --absd="hello" --abcs1234 -``` - -Flag parsing stops after the terminator "--". Unlike the flag package, -flags can be interspersed with arguments anywhere on the command line -before this terminator. - -Integer flags accept 1234, 0664, 0x1234 and may be negative. -Boolean flags (in their long form) accept 1, 0, t, f, true, false, -TRUE, FALSE, True, False. -Duration flags accept any input valid for time.ParseDuration. - -## Mutating or "Normalizing" Flag names - -It is possible to set a custom flag name 'normalization function.' It allows flag names to be mutated both when created in the code and when used on the command line to some 'normalized' form. The 'normalized' form is used for comparison. Two examples of using the custom normalization func follow. - -**Example #1**: You want -, _, and . in flags to compare the same. aka --my-flag == --my_flag == --my.flag - -``` go -func wordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { - from := []string{"-", "_"} - to := "." - for _, sep := range from { - name = strings.Replace(name, sep, to, -1) - } - return pflag.NormalizedName(name) -} - -myFlagSet.SetNormalizeFunc(wordSepNormalizeFunc) -``` - -**Example #2**: You want to alias two flags. aka --old-flag-name == --new-flag-name - -``` go -func aliasNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { - switch name { - case "old-flag-name": - name = "new-flag-name" - break - } - return pflag.NormalizedName(name) -} - -myFlagSet.SetNormalizeFunc(aliasNormalizeFunc) -``` - -## Deprecating a flag or its shorthand -It is possible to deprecate a flag, or just its shorthand. Deprecating a flag/shorthand hides it from help text and prints a usage message when the deprecated flag/shorthand is used. - -**Example #1**: You want to deprecate a flag named "badflag" as well as inform the users what flag they should use instead. -```go -// deprecate a flag by specifying its name and a usage message -flags.MarkDeprecated("badflag", "please use --good-flag instead") -``` -This hides "badflag" from help text, and prints `Flag --badflag has been deprecated, please use --good-flag instead` when "badflag" is used. - -**Example #2**: You want to keep a flag name "noshorthandflag" but deprecate its shortname "n". -```go -// deprecate a flag shorthand by specifying its flag name and a usage message -flags.MarkShorthandDeprecated("noshorthandflag", "please use --noshorthandflag only") -``` -This hides the shortname "n" from help text, and prints `Flag shorthand -n has been deprecated, please use --noshorthandflag only` when the shorthand "n" is used. - -Note that usage message is essential here, and it should not be empty. - -## Hidden flags -It is possible to mark a flag as hidden, meaning it will still function as normal, however will not show up in usage/help text. - -**Example**: You have a flag named "secretFlag" that you need for internal use only and don't want it showing up in help text, or for its usage text to be available. -```go -// hide a flag by specifying its name -flags.MarkHidden("secretFlag") -``` - -## Disable sorting of flags -`pflag` allows you to disable sorting of flags for help and usage message. - -**Example**: -```go -flags.BoolP("verbose", "v", false, "verbose output") -flags.String("coolflag", "yeaah", "it's really cool flag") -flags.Int("usefulflag", 777, "sometimes it's very useful") -flags.SortFlags = false -flags.PrintDefaults() -``` -**Output**: -``` - -v, --verbose verbose output - --coolflag string it's really cool flag (default "yeaah") - --usefulflag int sometimes it's very useful (default 777) -``` - - -## Supporting Go flags when using pflag -In order to support flags defined using Go's `flag` package, they must be added to the `pflag` flagset. This is usually necessary -to support flags defined by third-party dependencies (e.g. `golang/glog`). - -**Example**: You want to add the Go flags to the `CommandLine` flagset -```go -import ( - goflag "flag" - flag "github.com/spf13/pflag" -) - -var ip *int = flag.Int("flagname", 1234, "help message for flagname") - -func main() { - flag.CommandLine.AddGoFlagSet(goflag.CommandLine) - flag.Parse() -} -``` - -## More info - -You can see the full reference documentation of the pflag package -[at godoc.org][3], or through go's standard documentation system by -running `godoc -http=:6060` and browsing to -[http://localhost:6060/pkg/github.com/spf13/pflag][2] after -installation. - -[2]: http://localhost:6060/pkg/github.com/spf13/pflag -[3]: http://godoc.org/github.com/spf13/pflag diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/bool.go b/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/bool.go deleted file mode 100644 index c4c5c0bfda0..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/bool.go +++ /dev/null @@ -1,94 +0,0 @@ -package pflag - -import "strconv" - -// optional interface to indicate boolean flags that can be -// supplied without "=value" text -type boolFlag interface { - Value - IsBoolFlag() bool -} - -// -- bool Value -type boolValue bool - -func newBoolValue(val bool, p *bool) *boolValue { - *p = val - return (*boolValue)(p) -} - -func (b *boolValue) Set(s string) error { - v, err := strconv.ParseBool(s) - *b = boolValue(v) - return err -} - -func (b *boolValue) Type() string { - return "bool" -} - -func (b *boolValue) String() string { return strconv.FormatBool(bool(*b)) } - -func (b *boolValue) IsBoolFlag() bool { return true } - -func boolConv(sval string) (interface{}, error) { - return strconv.ParseBool(sval) -} - -// GetBool return the bool value of a flag with the given name -func (f *FlagSet) GetBool(name string) (bool, error) { - val, err := f.getFlagType(name, "bool", boolConv) - if err != nil { - return false, err - } - return val.(bool), nil -} - -// BoolVar defines a bool flag with specified name, default value, and usage string. -// The argument p points to a bool variable in which to store the value of the flag. -func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) { - f.BoolVarP(p, name, "", value, usage) -} - -// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BoolVarP(p *bool, name, shorthand string, value bool, usage string) { - flag := f.VarPF(newBoolValue(value, p), name, shorthand, usage) - flag.NoOptDefVal = "true" -} - -// BoolVar defines a bool flag with specified name, default value, and usage string. -// The argument p points to a bool variable in which to store the value of the flag. -func BoolVar(p *bool, name string, value bool, usage string) { - BoolVarP(p, name, "", value, usage) -} - -// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash. -func BoolVarP(p *bool, name, shorthand string, value bool, usage string) { - flag := CommandLine.VarPF(newBoolValue(value, p), name, shorthand, usage) - flag.NoOptDefVal = "true" -} - -// Bool defines a bool flag with specified name, default value, and usage string. -// The return value is the address of a bool variable that stores the value of the flag. -func (f *FlagSet) Bool(name string, value bool, usage string) *bool { - return f.BoolP(name, "", value, usage) -} - -// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BoolP(name, shorthand string, value bool, usage string) *bool { - p := new(bool) - f.BoolVarP(p, name, shorthand, value, usage) - return p -} - -// Bool defines a bool flag with specified name, default value, and usage string. -// The return value is the address of a bool variable that stores the value of the flag. -func Bool(name string, value bool, usage string) *bool { - return BoolP(name, "", value, usage) -} - -// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash. -func BoolP(name, shorthand string, value bool, usage string) *bool { - b := CommandLine.BoolP(name, shorthand, value, usage) - return b -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/bool_slice.go b/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/bool_slice.go deleted file mode 100644 index 5af02f1a75a..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/bool_slice.go +++ /dev/null @@ -1,147 +0,0 @@ -package pflag - -import ( - "io" - "strconv" - "strings" -) - -// -- boolSlice Value -type boolSliceValue struct { - value *[]bool - changed bool -} - -func newBoolSliceValue(val []bool, p *[]bool) *boolSliceValue { - bsv := new(boolSliceValue) - bsv.value = p - *bsv.value = val - return bsv -} - -// Set converts, and assigns, the comma-separated boolean argument string representation as the []bool value of this flag. -// If Set is called on a flag that already has a []bool assigned, the newly converted values will be appended. -func (s *boolSliceValue) Set(val string) error { - - // remove all quote characters - rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "") - - // read flag arguments with CSV parser - boolStrSlice, err := readAsCSV(rmQuote.Replace(val)) - if err != nil && err != io.EOF { - return err - } - - // parse boolean values into slice - out := make([]bool, 0, len(boolStrSlice)) - for _, boolStr := range boolStrSlice { - b, err := strconv.ParseBool(strings.TrimSpace(boolStr)) - if err != nil { - return err - } - out = append(out, b) - } - - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - - s.changed = true - - return nil -} - -// Type returns a string that uniquely represents this flag's type. -func (s *boolSliceValue) Type() string { - return "boolSlice" -} - -// String defines a "native" format for this boolean slice flag value. -func (s *boolSliceValue) String() string { - - boolStrSlice := make([]string, len(*s.value)) - for i, b := range *s.value { - boolStrSlice[i] = strconv.FormatBool(b) - } - - out, _ := writeAsCSV(boolStrSlice) - - return "[" + out + "]" -} - -func boolSliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []bool{}, nil - } - ss := strings.Split(val, ",") - out := make([]bool, len(ss)) - for i, t := range ss { - var err error - out[i], err = strconv.ParseBool(t) - if err != nil { - return nil, err - } - } - return out, nil -} - -// GetBoolSlice returns the []bool value of a flag with the given name. -func (f *FlagSet) GetBoolSlice(name string) ([]bool, error) { - val, err := f.getFlagType(name, "boolSlice", boolSliceConv) - if err != nil { - return []bool{}, err - } - return val.([]bool), nil -} - -// BoolSliceVar defines a boolSlice flag with specified name, default value, and usage string. -// The argument p points to a []bool variable in which to store the value of the flag. -func (f *FlagSet) BoolSliceVar(p *[]bool, name string, value []bool, usage string) { - f.VarP(newBoolSliceValue(value, p), name, "", usage) -} - -// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) { - f.VarP(newBoolSliceValue(value, p), name, shorthand, usage) -} - -// BoolSliceVar defines a []bool flag with specified name, default value, and usage string. -// The argument p points to a []bool variable in which to store the value of the flag. -func BoolSliceVar(p *[]bool, name string, value []bool, usage string) { - CommandLine.VarP(newBoolSliceValue(value, p), name, "", usage) -} - -// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash. -func BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) { - CommandLine.VarP(newBoolSliceValue(value, p), name, shorthand, usage) -} - -// BoolSlice defines a []bool flag with specified name, default value, and usage string. -// The return value is the address of a []bool variable that stores the value of the flag. -func (f *FlagSet) BoolSlice(name string, value []bool, usage string) *[]bool { - p := []bool{} - f.BoolSliceVarP(&p, name, "", value, usage) - return &p -} - -// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool { - p := []bool{} - f.BoolSliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// BoolSlice defines a []bool flag with specified name, default value, and usage string. -// The return value is the address of a []bool variable that stores the value of the flag. -func BoolSlice(name string, value []bool, usage string) *[]bool { - return CommandLine.BoolSliceP(name, "", value, usage) -} - -// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash. -func BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool { - return CommandLine.BoolSliceP(name, shorthand, value, usage) -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/bytes.go b/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/bytes.go deleted file mode 100644 index 67d53045708..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/bytes.go +++ /dev/null @@ -1,209 +0,0 @@ -package pflag - -import ( - "encoding/base64" - "encoding/hex" - "fmt" - "strings" -) - -// BytesHex adapts []byte for use as a flag. Value of flag is HEX encoded -type bytesHexValue []byte - -// String implements pflag.Value.String. -func (bytesHex bytesHexValue) String() string { - return fmt.Sprintf("%X", []byte(bytesHex)) -} - -// Set implements pflag.Value.Set. -func (bytesHex *bytesHexValue) Set(value string) error { - bin, err := hex.DecodeString(strings.TrimSpace(value)) - - if err != nil { - return err - } - - *bytesHex = bin - - return nil -} - -// Type implements pflag.Value.Type. -func (*bytesHexValue) Type() string { - return "bytesHex" -} - -func newBytesHexValue(val []byte, p *[]byte) *bytesHexValue { - *p = val - return (*bytesHexValue)(p) -} - -func bytesHexConv(sval string) (interface{}, error) { - - bin, err := hex.DecodeString(sval) - - if err == nil { - return bin, nil - } - - return nil, fmt.Errorf("invalid string being converted to Bytes: %s %s", sval, err) -} - -// GetBytesHex return the []byte value of a flag with the given name -func (f *FlagSet) GetBytesHex(name string) ([]byte, error) { - val, err := f.getFlagType(name, "bytesHex", bytesHexConv) - - if err != nil { - return []byte{}, err - } - - return val.([]byte), nil -} - -// BytesHexVar defines an []byte flag with specified name, default value, and usage string. -// The argument p points to an []byte variable in which to store the value of the flag. -func (f *FlagSet) BytesHexVar(p *[]byte, name string, value []byte, usage string) { - f.VarP(newBytesHexValue(value, p), name, "", usage) -} - -// BytesHexVarP is like BytesHexVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BytesHexVarP(p *[]byte, name, shorthand string, value []byte, usage string) { - f.VarP(newBytesHexValue(value, p), name, shorthand, usage) -} - -// BytesHexVar defines an []byte flag with specified name, default value, and usage string. -// The argument p points to an []byte variable in which to store the value of the flag. -func BytesHexVar(p *[]byte, name string, value []byte, usage string) { - CommandLine.VarP(newBytesHexValue(value, p), name, "", usage) -} - -// BytesHexVarP is like BytesHexVar, but accepts a shorthand letter that can be used after a single dash. -func BytesHexVarP(p *[]byte, name, shorthand string, value []byte, usage string) { - CommandLine.VarP(newBytesHexValue(value, p), name, shorthand, usage) -} - -// BytesHex defines an []byte flag with specified name, default value, and usage string. -// The return value is the address of an []byte variable that stores the value of the flag. -func (f *FlagSet) BytesHex(name string, value []byte, usage string) *[]byte { - p := new([]byte) - f.BytesHexVarP(p, name, "", value, usage) - return p -} - -// BytesHexP is like BytesHex, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BytesHexP(name, shorthand string, value []byte, usage string) *[]byte { - p := new([]byte) - f.BytesHexVarP(p, name, shorthand, value, usage) - return p -} - -// BytesHex defines an []byte flag with specified name, default value, and usage string. -// The return value is the address of an []byte variable that stores the value of the flag. -func BytesHex(name string, value []byte, usage string) *[]byte { - return CommandLine.BytesHexP(name, "", value, usage) -} - -// BytesHexP is like BytesHex, but accepts a shorthand letter that can be used after a single dash. -func BytesHexP(name, shorthand string, value []byte, usage string) *[]byte { - return CommandLine.BytesHexP(name, shorthand, value, usage) -} - -// BytesBase64 adapts []byte for use as a flag. Value of flag is Base64 encoded -type bytesBase64Value []byte - -// String implements pflag.Value.String. -func (bytesBase64 bytesBase64Value) String() string { - return base64.StdEncoding.EncodeToString([]byte(bytesBase64)) -} - -// Set implements pflag.Value.Set. -func (bytesBase64 *bytesBase64Value) Set(value string) error { - bin, err := base64.StdEncoding.DecodeString(strings.TrimSpace(value)) - - if err != nil { - return err - } - - *bytesBase64 = bin - - return nil -} - -// Type implements pflag.Value.Type. -func (*bytesBase64Value) Type() string { - return "bytesBase64" -} - -func newBytesBase64Value(val []byte, p *[]byte) *bytesBase64Value { - *p = val - return (*bytesBase64Value)(p) -} - -func bytesBase64ValueConv(sval string) (interface{}, error) { - - bin, err := base64.StdEncoding.DecodeString(sval) - if err == nil { - return bin, nil - } - - return nil, fmt.Errorf("invalid string being converted to Bytes: %s %s", sval, err) -} - -// GetBytesBase64 return the []byte value of a flag with the given name -func (f *FlagSet) GetBytesBase64(name string) ([]byte, error) { - val, err := f.getFlagType(name, "bytesBase64", bytesBase64ValueConv) - - if err != nil { - return []byte{}, err - } - - return val.([]byte), nil -} - -// BytesBase64Var defines an []byte flag with specified name, default value, and usage string. -// The argument p points to an []byte variable in which to store the value of the flag. -func (f *FlagSet) BytesBase64Var(p *[]byte, name string, value []byte, usage string) { - f.VarP(newBytesBase64Value(value, p), name, "", usage) -} - -// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) { - f.VarP(newBytesBase64Value(value, p), name, shorthand, usage) -} - -// BytesBase64Var defines an []byte flag with specified name, default value, and usage string. -// The argument p points to an []byte variable in which to store the value of the flag. -func BytesBase64Var(p *[]byte, name string, value []byte, usage string) { - CommandLine.VarP(newBytesBase64Value(value, p), name, "", usage) -} - -// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash. -func BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) { - CommandLine.VarP(newBytesBase64Value(value, p), name, shorthand, usage) -} - -// BytesBase64 defines an []byte flag with specified name, default value, and usage string. -// The return value is the address of an []byte variable that stores the value of the flag. -func (f *FlagSet) BytesBase64(name string, value []byte, usage string) *[]byte { - p := new([]byte) - f.BytesBase64VarP(p, name, "", value, usage) - return p -} - -// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte { - p := new([]byte) - f.BytesBase64VarP(p, name, shorthand, value, usage) - return p -} - -// BytesBase64 defines an []byte flag with specified name, default value, and usage string. -// The return value is the address of an []byte variable that stores the value of the flag. -func BytesBase64(name string, value []byte, usage string) *[]byte { - return CommandLine.BytesBase64P(name, "", value, usage) -} - -// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash. -func BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte { - return CommandLine.BytesBase64P(name, shorthand, value, usage) -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/count.go b/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/count.go deleted file mode 100644 index aa126e44d1c..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/count.go +++ /dev/null @@ -1,96 +0,0 @@ -package pflag - -import "strconv" - -// -- count Value -type countValue int - -func newCountValue(val int, p *int) *countValue { - *p = val - return (*countValue)(p) -} - -func (i *countValue) Set(s string) error { - // "+1" means that no specific value was passed, so increment - if s == "+1" { - *i = countValue(*i + 1) - return nil - } - v, err := strconv.ParseInt(s, 0, 0) - *i = countValue(v) - return err -} - -func (i *countValue) Type() string { - return "count" -} - -func (i *countValue) String() string { return strconv.Itoa(int(*i)) } - -func countConv(sval string) (interface{}, error) { - i, err := strconv.Atoi(sval) - if err != nil { - return nil, err - } - return i, nil -} - -// GetCount return the int value of a flag with the given name -func (f *FlagSet) GetCount(name string) (int, error) { - val, err := f.getFlagType(name, "count", countConv) - if err != nil { - return 0, err - } - return val.(int), nil -} - -// CountVar defines a count flag with specified name, default value, and usage string. -// The argument p points to an int variable in which to store the value of the flag. -// A count flag will add 1 to its value evey time it is found on the command line -func (f *FlagSet) CountVar(p *int, name string, usage string) { - f.CountVarP(p, name, "", usage) -} - -// CountVarP is like CountVar only take a shorthand for the flag name. -func (f *FlagSet) CountVarP(p *int, name, shorthand string, usage string) { - flag := f.VarPF(newCountValue(0, p), name, shorthand, usage) - flag.NoOptDefVal = "+1" -} - -// CountVar like CountVar only the flag is placed on the CommandLine instead of a given flag set -func CountVar(p *int, name string, usage string) { - CommandLine.CountVar(p, name, usage) -} - -// CountVarP is like CountVar only take a shorthand for the flag name. -func CountVarP(p *int, name, shorthand string, usage string) { - CommandLine.CountVarP(p, name, shorthand, usage) -} - -// Count defines a count flag with specified name, default value, and usage string. -// The return value is the address of an int variable that stores the value of the flag. -// A count flag will add 1 to its value evey time it is found on the command line -func (f *FlagSet) Count(name string, usage string) *int { - p := new(int) - f.CountVarP(p, name, "", usage) - return p -} - -// CountP is like Count only takes a shorthand for the flag name. -func (f *FlagSet) CountP(name, shorthand string, usage string) *int { - p := new(int) - f.CountVarP(p, name, shorthand, usage) - return p -} - -// Count defines a count flag with specified name, default value, and usage string. -// The return value is the address of an int variable that stores the value of the flag. -// A count flag will add 1 to its value evey time it is found on the command line -func Count(name string, usage string) *int { - return CommandLine.CountP(name, "", usage) -} - -// CountP is like Count only takes a shorthand for the flag name. -func CountP(name, shorthand string, usage string) *int { - return CommandLine.CountP(name, shorthand, usage) -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/duration.go b/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/duration.go deleted file mode 100644 index e9debef88ee..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/duration.go +++ /dev/null @@ -1,86 +0,0 @@ -package pflag - -import ( - "time" -) - -// -- time.Duration Value -type durationValue time.Duration - -func newDurationValue(val time.Duration, p *time.Duration) *durationValue { - *p = val - return (*durationValue)(p) -} - -func (d *durationValue) Set(s string) error { - v, err := time.ParseDuration(s) - *d = durationValue(v) - return err -} - -func (d *durationValue) Type() string { - return "duration" -} - -func (d *durationValue) String() string { return (*time.Duration)(d).String() } - -func durationConv(sval string) (interface{}, error) { - return time.ParseDuration(sval) -} - -// GetDuration return the duration value of a flag with the given name -func (f *FlagSet) GetDuration(name string) (time.Duration, error) { - val, err := f.getFlagType(name, "duration", durationConv) - if err != nil { - return 0, err - } - return val.(time.Duration), nil -} - -// DurationVar defines a time.Duration flag with specified name, default value, and usage string. -// The argument p points to a time.Duration variable in which to store the value of the flag. -func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) { - f.VarP(newDurationValue(value, p), name, "", usage) -} - -// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) { - f.VarP(newDurationValue(value, p), name, shorthand, usage) -} - -// DurationVar defines a time.Duration flag with specified name, default value, and usage string. -// The argument p points to a time.Duration variable in which to store the value of the flag. -func DurationVar(p *time.Duration, name string, value time.Duration, usage string) { - CommandLine.VarP(newDurationValue(value, p), name, "", usage) -} - -// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash. -func DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) { - CommandLine.VarP(newDurationValue(value, p), name, shorthand, usage) -} - -// Duration defines a time.Duration flag with specified name, default value, and usage string. -// The return value is the address of a time.Duration variable that stores the value of the flag. -func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration { - p := new(time.Duration) - f.DurationVarP(p, name, "", value, usage) - return p -} - -// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration { - p := new(time.Duration) - f.DurationVarP(p, name, shorthand, value, usage) - return p -} - -// Duration defines a time.Duration flag with specified name, default value, and usage string. -// The return value is the address of a time.Duration variable that stores the value of the flag. -func Duration(name string, value time.Duration, usage string) *time.Duration { - return CommandLine.DurationP(name, "", value, usage) -} - -// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash. -func DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration { - return CommandLine.DurationP(name, shorthand, value, usage) -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/duration_slice.go b/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/duration_slice.go deleted file mode 100644 index 52c6b6dc104..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/duration_slice.go +++ /dev/null @@ -1,128 +0,0 @@ -package pflag - -import ( - "fmt" - "strings" - "time" -) - -// -- durationSlice Value -type durationSliceValue struct { - value *[]time.Duration - changed bool -} - -func newDurationSliceValue(val []time.Duration, p *[]time.Duration) *durationSliceValue { - dsv := new(durationSliceValue) - dsv.value = p - *dsv.value = val - return dsv -} - -func (s *durationSliceValue) Set(val string) error { - ss := strings.Split(val, ",") - out := make([]time.Duration, len(ss)) - for i, d := range ss { - var err error - out[i], err = time.ParseDuration(d) - if err != nil { - return err - } - - } - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - s.changed = true - return nil -} - -func (s *durationSliceValue) Type() string { - return "durationSlice" -} - -func (s *durationSliceValue) String() string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = fmt.Sprintf("%s", d) - } - return "[" + strings.Join(out, ",") + "]" -} - -func durationSliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []time.Duration{}, nil - } - ss := strings.Split(val, ",") - out := make([]time.Duration, len(ss)) - for i, d := range ss { - var err error - out[i], err = time.ParseDuration(d) - if err != nil { - return nil, err - } - - } - return out, nil -} - -// GetDurationSlice returns the []time.Duration value of a flag with the given name -func (f *FlagSet) GetDurationSlice(name string) ([]time.Duration, error) { - val, err := f.getFlagType(name, "durationSlice", durationSliceConv) - if err != nil { - return []time.Duration{}, err - } - return val.([]time.Duration), nil -} - -// DurationSliceVar defines a durationSlice flag with specified name, default value, and usage string. -// The argument p points to a []time.Duration variable in which to store the value of the flag. -func (f *FlagSet) DurationSliceVar(p *[]time.Duration, name string, value []time.Duration, usage string) { - f.VarP(newDurationSliceValue(value, p), name, "", usage) -} - -// DurationSliceVarP is like DurationSliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) DurationSliceVarP(p *[]time.Duration, name, shorthand string, value []time.Duration, usage string) { - f.VarP(newDurationSliceValue(value, p), name, shorthand, usage) -} - -// DurationSliceVar defines a duration[] flag with specified name, default value, and usage string. -// The argument p points to a duration[] variable in which to store the value of the flag. -func DurationSliceVar(p *[]time.Duration, name string, value []time.Duration, usage string) { - CommandLine.VarP(newDurationSliceValue(value, p), name, "", usage) -} - -// DurationSliceVarP is like DurationSliceVar, but accepts a shorthand letter that can be used after a single dash. -func DurationSliceVarP(p *[]time.Duration, name, shorthand string, value []time.Duration, usage string) { - CommandLine.VarP(newDurationSliceValue(value, p), name, shorthand, usage) -} - -// DurationSlice defines a []time.Duration flag with specified name, default value, and usage string. -// The return value is the address of a []time.Duration variable that stores the value of the flag. -func (f *FlagSet) DurationSlice(name string, value []time.Duration, usage string) *[]time.Duration { - p := []time.Duration{} - f.DurationSliceVarP(&p, name, "", value, usage) - return &p -} - -// DurationSliceP is like DurationSlice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) DurationSliceP(name, shorthand string, value []time.Duration, usage string) *[]time.Duration { - p := []time.Duration{} - f.DurationSliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// DurationSlice defines a []time.Duration flag with specified name, default value, and usage string. -// The return value is the address of a []time.Duration variable that stores the value of the flag. -func DurationSlice(name string, value []time.Duration, usage string) *[]time.Duration { - return CommandLine.DurationSliceP(name, "", value, usage) -} - -// DurationSliceP is like DurationSlice, but accepts a shorthand letter that can be used after a single dash. -func DurationSliceP(name, shorthand string, value []time.Duration, usage string) *[]time.Duration { - return CommandLine.DurationSliceP(name, shorthand, value, usage) -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/flag.go b/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/flag.go deleted file mode 100644 index 9beeda8ecca..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/flag.go +++ /dev/null @@ -1,1227 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package pflag is a drop-in replacement for Go's flag package, implementing -POSIX/GNU-style --flags. - -pflag is compatible with the GNU extensions to the POSIX recommendations -for command-line options. See -http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html - -Usage: - -pflag is a drop-in replacement of Go's native flag package. If you import -pflag under the name "flag" then all code should continue to function -with no changes. - - import flag "github.com/spf13/pflag" - -There is one exception to this: if you directly instantiate the Flag struct -there is one more field "Shorthand" that you will need to set. -Most code never instantiates this struct directly, and instead uses -functions such as String(), BoolVar(), and Var(), and is therefore -unaffected. - -Define flags using flag.String(), Bool(), Int(), etc. - -This declares an integer flag, -flagname, stored in the pointer ip, with type *int. - var ip = flag.Int("flagname", 1234, "help message for flagname") -If you like, you can bind the flag to a variable using the Var() functions. - var flagvar int - func init() { - flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") - } -Or you can create custom flags that satisfy the Value interface (with -pointer receivers) and couple them to flag parsing by - flag.Var(&flagVal, "name", "help message for flagname") -For such flags, the default value is just the initial value of the variable. - -After all flags are defined, call - flag.Parse() -to parse the command line into the defined flags. - -Flags may then be used directly. If you're using the flags themselves, -they are all pointers; if you bind to variables, they're values. - fmt.Println("ip has value ", *ip) - fmt.Println("flagvar has value ", flagvar) - -After parsing, the arguments after the flag are available as the -slice flag.Args() or individually as flag.Arg(i). -The arguments are indexed from 0 through flag.NArg()-1. - -The pflag package also defines some new functions that are not in flag, -that give one-letter shorthands for flags. You can use these by appending -'P' to the name of any function that defines a flag. - var ip = flag.IntP("flagname", "f", 1234, "help message") - var flagvar bool - func init() { - flag.BoolVarP("boolname", "b", true, "help message") - } - flag.VarP(&flagVar, "varname", "v", 1234, "help message") -Shorthand letters can be used with single dashes on the command line. -Boolean shorthand flags can be combined with other shorthand flags. - -Command line flag syntax: - --flag // boolean flags only - --flag=x - -Unlike the flag package, a single dash before an option means something -different than a double dash. Single dashes signify a series of shorthand -letters for flags. All but the last shorthand letter must be boolean flags. - // boolean flags - -f - -abc - // non-boolean flags - -n 1234 - -Ifile - // mixed - -abcs "hello" - -abcn1234 - -Flag parsing stops after the terminator "--". Unlike the flag package, -flags can be interspersed with arguments anywhere on the command line -before this terminator. - -Integer flags accept 1234, 0664, 0x1234 and may be negative. -Boolean flags (in their long form) accept 1, 0, t, f, true, false, -TRUE, FALSE, True, False. -Duration flags accept any input valid for time.ParseDuration. - -The default set of command-line flags is controlled by -top-level functions. The FlagSet type allows one to define -independent sets of flags, such as to implement subcommands -in a command-line interface. The methods of FlagSet are -analogous to the top-level functions for the command-line -flag set. -*/ -package pflag - -import ( - "bytes" - "errors" - goflag "flag" - "fmt" - "io" - "os" - "sort" - "strings" -) - -// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined. -var ErrHelp = errors.New("pflag: help requested") - -// ErrorHandling defines how to handle flag parsing errors. -type ErrorHandling int - -const ( - // ContinueOnError will return an err from Parse() if an error is found - ContinueOnError ErrorHandling = iota - // ExitOnError will call os.Exit(2) if an error is found when parsing - ExitOnError - // PanicOnError will panic() if an error is found when parsing flags - PanicOnError -) - -// ParseErrorsWhitelist defines the parsing errors that can be ignored -type ParseErrorsWhitelist struct { - // UnknownFlags will ignore unknown flags errors and continue parsing rest of the flags - UnknownFlags bool -} - -// NormalizedName is a flag name that has been normalized according to rules -// for the FlagSet (e.g. making '-' and '_' equivalent). -type NormalizedName string - -// A FlagSet represents a set of defined flags. -type FlagSet struct { - // Usage is the function called when an error occurs while parsing flags. - // The field is a function (not a method) that may be changed to point to - // a custom error handler. - Usage func() - - // SortFlags is used to indicate, if user wants to have sorted flags in - // help/usage messages. - SortFlags bool - - // ParseErrorsWhitelist is used to configure a whitelist of errors - ParseErrorsWhitelist ParseErrorsWhitelist - - name string - parsed bool - actual map[NormalizedName]*Flag - orderedActual []*Flag - sortedActual []*Flag - formal map[NormalizedName]*Flag - orderedFormal []*Flag - sortedFormal []*Flag - shorthands map[byte]*Flag - args []string // arguments after flags - argsLenAtDash int // len(args) when a '--' was located when parsing, or -1 if no -- - errorHandling ErrorHandling - output io.Writer // nil means stderr; use out() accessor - interspersed bool // allow interspersed option/non-option args - normalizeNameFunc func(f *FlagSet, name string) NormalizedName - - addedGoFlagSets []*goflag.FlagSet -} - -// A Flag represents the state of a flag. -type Flag struct { - Name string // name as it appears on command line - Shorthand string // one-letter abbreviated flag - Usage string // help message - Value Value // value as set - DefValue string // default value (as text); for usage message - Changed bool // If the user set the value (or if left to default) - NoOptDefVal string // default value (as text); if the flag is on the command line without any options - Deprecated string // If this flag is deprecated, this string is the new or now thing to use - Hidden bool // used by cobra.Command to allow flags to be hidden from help/usage text - ShorthandDeprecated string // If the shorthand of this flag is deprecated, this string is the new or now thing to use - Annotations map[string][]string // used by cobra.Command bash autocomple code -} - -// Value is the interface to the dynamic value stored in a flag. -// (The default value is represented as a string.) -type Value interface { - String() string - Set(string) error - Type() string -} - -// sortFlags returns the flags as a slice in lexicographical sorted order. -func sortFlags(flags map[NormalizedName]*Flag) []*Flag { - list := make(sort.StringSlice, len(flags)) - i := 0 - for k := range flags { - list[i] = string(k) - i++ - } - list.Sort() - result := make([]*Flag, len(list)) - for i, name := range list { - result[i] = flags[NormalizedName(name)] - } - return result -} - -// SetNormalizeFunc allows you to add a function which can translate flag names. -// Flags added to the FlagSet will be translated and then when anything tries to -// look up the flag that will also be translated. So it would be possible to create -// a flag named "getURL" and have it translated to "geturl". A user could then pass -// "--getUrl" which may also be translated to "geturl" and everything will work. -func (f *FlagSet) SetNormalizeFunc(n func(f *FlagSet, name string) NormalizedName) { - f.normalizeNameFunc = n - f.sortedFormal = f.sortedFormal[:0] - for fname, flag := range f.formal { - nname := f.normalizeFlagName(flag.Name) - if fname == nname { - continue - } - flag.Name = string(nname) - delete(f.formal, fname) - f.formal[nname] = flag - if _, set := f.actual[fname]; set { - delete(f.actual, fname) - f.actual[nname] = flag - } - } -} - -// GetNormalizeFunc returns the previously set NormalizeFunc of a function which -// does no translation, if not set previously. -func (f *FlagSet) GetNormalizeFunc() func(f *FlagSet, name string) NormalizedName { - if f.normalizeNameFunc != nil { - return f.normalizeNameFunc - } - return func(f *FlagSet, name string) NormalizedName { return NormalizedName(name) } -} - -func (f *FlagSet) normalizeFlagName(name string) NormalizedName { - n := f.GetNormalizeFunc() - return n(f, name) -} - -func (f *FlagSet) out() io.Writer { - if f.output == nil { - return os.Stderr - } - return f.output -} - -// SetOutput sets the destination for usage and error messages. -// If output is nil, os.Stderr is used. -func (f *FlagSet) SetOutput(output io.Writer) { - f.output = output -} - -// VisitAll visits the flags in lexicographical order or -// in primordial order if f.SortFlags is false, calling fn for each. -// It visits all flags, even those not set. -func (f *FlagSet) VisitAll(fn func(*Flag)) { - if len(f.formal) == 0 { - return - } - - var flags []*Flag - if f.SortFlags { - if len(f.formal) != len(f.sortedFormal) { - f.sortedFormal = sortFlags(f.formal) - } - flags = f.sortedFormal - } else { - flags = f.orderedFormal - } - - for _, flag := range flags { - fn(flag) - } -} - -// HasFlags returns a bool to indicate if the FlagSet has any flags defined. -func (f *FlagSet) HasFlags() bool { - return len(f.formal) > 0 -} - -// HasAvailableFlags returns a bool to indicate if the FlagSet has any flags -// that are not hidden. -func (f *FlagSet) HasAvailableFlags() bool { - for _, flag := range f.formal { - if !flag.Hidden { - return true - } - } - return false -} - -// VisitAll visits the command-line flags in lexicographical order or -// in primordial order if f.SortFlags is false, calling fn for each. -// It visits all flags, even those not set. -func VisitAll(fn func(*Flag)) { - CommandLine.VisitAll(fn) -} - -// Visit visits the flags in lexicographical order or -// in primordial order if f.SortFlags is false, calling fn for each. -// It visits only those flags that have been set. -func (f *FlagSet) Visit(fn func(*Flag)) { - if len(f.actual) == 0 { - return - } - - var flags []*Flag - if f.SortFlags { - if len(f.actual) != len(f.sortedActual) { - f.sortedActual = sortFlags(f.actual) - } - flags = f.sortedActual - } else { - flags = f.orderedActual - } - - for _, flag := range flags { - fn(flag) - } -} - -// Visit visits the command-line flags in lexicographical order or -// in primordial order if f.SortFlags is false, calling fn for each. -// It visits only those flags that have been set. -func Visit(fn func(*Flag)) { - CommandLine.Visit(fn) -} - -// Lookup returns the Flag structure of the named flag, returning nil if none exists. -func (f *FlagSet) Lookup(name string) *Flag { - return f.lookup(f.normalizeFlagName(name)) -} - -// ShorthandLookup returns the Flag structure of the short handed flag, -// returning nil if none exists. -// It panics, if len(name) > 1. -func (f *FlagSet) ShorthandLookup(name string) *Flag { - if name == "" { - return nil - } - if len(name) > 1 { - msg := fmt.Sprintf("can not look up shorthand which is more than one ASCII character: %q", name) - fmt.Fprintf(f.out(), msg) - panic(msg) - } - c := name[0] - return f.shorthands[c] -} - -// lookup returns the Flag structure of the named flag, returning nil if none exists. -func (f *FlagSet) lookup(name NormalizedName) *Flag { - return f.formal[name] -} - -// func to return a given type for a given flag name -func (f *FlagSet) getFlagType(name string, ftype string, convFunc func(sval string) (interface{}, error)) (interface{}, error) { - flag := f.Lookup(name) - if flag == nil { - err := fmt.Errorf("flag accessed but not defined: %s", name) - return nil, err - } - - if flag.Value.Type() != ftype { - err := fmt.Errorf("trying to get %s value of flag of type %s", ftype, flag.Value.Type()) - return nil, err - } - - sval := flag.Value.String() - result, err := convFunc(sval) - if err != nil { - return nil, err - } - return result, nil -} - -// ArgsLenAtDash will return the length of f.Args at the moment when a -- was -// found during arg parsing. This allows your program to know which args were -// before the -- and which came after. -func (f *FlagSet) ArgsLenAtDash() int { - return f.argsLenAtDash -} - -// MarkDeprecated indicated that a flag is deprecated in your program. It will -// continue to function but will not show up in help or usage messages. Using -// this flag will also print the given usageMessage. -func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error { - flag := f.Lookup(name) - if flag == nil { - return fmt.Errorf("flag %q does not exist", name) - } - if usageMessage == "" { - return fmt.Errorf("deprecated message for flag %q must be set", name) - } - flag.Deprecated = usageMessage - flag.Hidden = true - return nil -} - -// MarkShorthandDeprecated will mark the shorthand of a flag deprecated in your -// program. It will continue to function but will not show up in help or usage -// messages. Using this flag will also print the given usageMessage. -func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) error { - flag := f.Lookup(name) - if flag == nil { - return fmt.Errorf("flag %q does not exist", name) - } - if usageMessage == "" { - return fmt.Errorf("deprecated message for flag %q must be set", name) - } - flag.ShorthandDeprecated = usageMessage - return nil -} - -// MarkHidden sets a flag to 'hidden' in your program. It will continue to -// function but will not show up in help or usage messages. -func (f *FlagSet) MarkHidden(name string) error { - flag := f.Lookup(name) - if flag == nil { - return fmt.Errorf("flag %q does not exist", name) - } - flag.Hidden = true - return nil -} - -// Lookup returns the Flag structure of the named command-line flag, -// returning nil if none exists. -func Lookup(name string) *Flag { - return CommandLine.Lookup(name) -} - -// ShorthandLookup returns the Flag structure of the short handed flag, -// returning nil if none exists. -func ShorthandLookup(name string) *Flag { - return CommandLine.ShorthandLookup(name) -} - -// Set sets the value of the named flag. -func (f *FlagSet) Set(name, value string) error { - normalName := f.normalizeFlagName(name) - flag, ok := f.formal[normalName] - if !ok { - return fmt.Errorf("no such flag -%v", name) - } - - err := flag.Value.Set(value) - if err != nil { - var flagName string - if flag.Shorthand != "" && flag.ShorthandDeprecated == "" { - flagName = fmt.Sprintf("-%s, --%s", flag.Shorthand, flag.Name) - } else { - flagName = fmt.Sprintf("--%s", flag.Name) - } - return fmt.Errorf("invalid argument %q for %q flag: %v", value, flagName, err) - } - - if !flag.Changed { - if f.actual == nil { - f.actual = make(map[NormalizedName]*Flag) - } - f.actual[normalName] = flag - f.orderedActual = append(f.orderedActual, flag) - - flag.Changed = true - } - - if flag.Deprecated != "" { - fmt.Fprintf(f.out(), "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated) - } - return nil -} - -// SetAnnotation allows one to set arbitrary annotations on a flag in the FlagSet. -// This is sometimes used by spf13/cobra programs which want to generate additional -// bash completion information. -func (f *FlagSet) SetAnnotation(name, key string, values []string) error { - normalName := f.normalizeFlagName(name) - flag, ok := f.formal[normalName] - if !ok { - return fmt.Errorf("no such flag -%v", name) - } - if flag.Annotations == nil { - flag.Annotations = map[string][]string{} - } - flag.Annotations[key] = values - return nil -} - -// Changed returns true if the flag was explicitly set during Parse() and false -// otherwise -func (f *FlagSet) Changed(name string) bool { - flag := f.Lookup(name) - // If a flag doesn't exist, it wasn't changed.... - if flag == nil { - return false - } - return flag.Changed -} - -// Set sets the value of the named command-line flag. -func Set(name, value string) error { - return CommandLine.Set(name, value) -} - -// PrintDefaults prints, to standard error unless configured -// otherwise, the default values of all defined flags in the set. -func (f *FlagSet) PrintDefaults() { - usages := f.FlagUsages() - fmt.Fprint(f.out(), usages) -} - -// defaultIsZeroValue returns true if the default value for this flag represents -// a zero value. -func (f *Flag) defaultIsZeroValue() bool { - switch f.Value.(type) { - case boolFlag: - return f.DefValue == "false" - case *durationValue: - // Beginning in Go 1.7, duration zero values are "0s" - return f.DefValue == "0" || f.DefValue == "0s" - case *intValue, *int8Value, *int32Value, *int64Value, *uintValue, *uint8Value, *uint16Value, *uint32Value, *uint64Value, *countValue, *float32Value, *float64Value: - return f.DefValue == "0" - case *stringValue: - return f.DefValue == "" - case *ipValue, *ipMaskValue, *ipNetValue: - return f.DefValue == "" - case *intSliceValue, *stringSliceValue, *stringArrayValue: - return f.DefValue == "[]" - default: - switch f.Value.String() { - case "false": - return true - case "": - return true - case "": - return true - case "0": - return true - } - return false - } -} - -// UnquoteUsage extracts a back-quoted name from the usage -// string for a flag and returns it and the un-quoted usage. -// Given "a `name` to show" it returns ("name", "a name to show"). -// If there are no back quotes, the name is an educated guess of the -// type of the flag's value, or the empty string if the flag is boolean. -func UnquoteUsage(flag *Flag) (name string, usage string) { - // Look for a back-quoted name, but avoid the strings package. - usage = flag.Usage - for i := 0; i < len(usage); i++ { - if usage[i] == '`' { - for j := i + 1; j < len(usage); j++ { - if usage[j] == '`' { - name = usage[i+1 : j] - usage = usage[:i] + name + usage[j+1:] - return name, usage - } - } - break // Only one back quote; use type name. - } - } - - name = flag.Value.Type() - switch name { - case "bool": - name = "" - case "float64": - name = "float" - case "int64": - name = "int" - case "uint64": - name = "uint" - case "stringSlice": - name = "strings" - case "intSlice": - name = "ints" - case "uintSlice": - name = "uints" - case "boolSlice": - name = "bools" - } - - return -} - -// Splits the string `s` on whitespace into an initial substring up to -// `i` runes in length and the remainder. Will go `slop` over `i` if -// that encompasses the entire string (which allows the caller to -// avoid short orphan words on the final line). -func wrapN(i, slop int, s string) (string, string) { - if i+slop > len(s) { - return s, "" - } - - w := strings.LastIndexAny(s[:i], " \t\n") - if w <= 0 { - return s, "" - } - nlPos := strings.LastIndex(s[:i], "\n") - if nlPos > 0 && nlPos < w { - return s[:nlPos], s[nlPos+1:] - } - return s[:w], s[w+1:] -} - -// Wraps the string `s` to a maximum width `w` with leading indent -// `i`. The first line is not indented (this is assumed to be done by -// caller). Pass `w` == 0 to do no wrapping -func wrap(i, w int, s string) string { - if w == 0 { - return strings.Replace(s, "\n", "\n"+strings.Repeat(" ", i), -1) - } - - // space between indent i and end of line width w into which - // we should wrap the text. - wrap := w - i - - var r, l string - - // Not enough space for sensible wrapping. Wrap as a block on - // the next line instead. - if wrap < 24 { - i = 16 - wrap = w - i - r += "\n" + strings.Repeat(" ", i) - } - // If still not enough space then don't even try to wrap. - if wrap < 24 { - return strings.Replace(s, "\n", r, -1) - } - - // Try to avoid short orphan words on the final line, by - // allowing wrapN to go a bit over if that would fit in the - // remainder of the line. - slop := 5 - wrap = wrap - slop - - // Handle first line, which is indented by the caller (or the - // special case above) - l, s = wrapN(wrap, slop, s) - r = r + strings.Replace(l, "\n", "\n"+strings.Repeat(" ", i), -1) - - // Now wrap the rest - for s != "" { - var t string - - t, s = wrapN(wrap, slop, s) - r = r + "\n" + strings.Repeat(" ", i) + strings.Replace(t, "\n", "\n"+strings.Repeat(" ", i), -1) - } - - return r - -} - -// FlagUsagesWrapped returns a string containing the usage information -// for all flags in the FlagSet. Wrapped to `cols` columns (0 for no -// wrapping) -func (f *FlagSet) FlagUsagesWrapped(cols int) string { - buf := new(bytes.Buffer) - - lines := make([]string, 0, len(f.formal)) - - maxlen := 0 - f.VisitAll(func(flag *Flag) { - if flag.Hidden { - return - } - - line := "" - if flag.Shorthand != "" && flag.ShorthandDeprecated == "" { - line = fmt.Sprintf(" -%s, --%s", flag.Shorthand, flag.Name) - } else { - line = fmt.Sprintf(" --%s", flag.Name) - } - - varname, usage := UnquoteUsage(flag) - if varname != "" { - line += " " + varname - } - if flag.NoOptDefVal != "" { - switch flag.Value.Type() { - case "string": - line += fmt.Sprintf("[=\"%s\"]", flag.NoOptDefVal) - case "bool": - if flag.NoOptDefVal != "true" { - line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) - } - case "count": - if flag.NoOptDefVal != "+1" { - line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) - } - default: - line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) - } - } - - // This special character will be replaced with spacing once the - // correct alignment is calculated - line += "\x00" - if len(line) > maxlen { - maxlen = len(line) - } - - line += usage - if !flag.defaultIsZeroValue() { - if flag.Value.Type() == "string" { - line += fmt.Sprintf(" (default %q)", flag.DefValue) - } else { - line += fmt.Sprintf(" (default %s)", flag.DefValue) - } - } - if len(flag.Deprecated) != 0 { - line += fmt.Sprintf(" (DEPRECATED: %s)", flag.Deprecated) - } - - lines = append(lines, line) - }) - - for _, line := range lines { - sidx := strings.Index(line, "\x00") - spacing := strings.Repeat(" ", maxlen-sidx) - // maxlen + 2 comes from + 1 for the \x00 and + 1 for the (deliberate) off-by-one in maxlen-sidx - fmt.Fprintln(buf, line[:sidx], spacing, wrap(maxlen+2, cols, line[sidx+1:])) - } - - return buf.String() -} - -// FlagUsages returns a string containing the usage information for all flags in -// the FlagSet -func (f *FlagSet) FlagUsages() string { - return f.FlagUsagesWrapped(0) -} - -// PrintDefaults prints to standard error the default values of all defined command-line flags. -func PrintDefaults() { - CommandLine.PrintDefaults() -} - -// defaultUsage is the default function to print a usage message. -func defaultUsage(f *FlagSet) { - fmt.Fprintf(f.out(), "Usage of %s:\n", f.name) - f.PrintDefaults() -} - -// NOTE: Usage is not just defaultUsage(CommandLine) -// because it serves (via godoc flag Usage) as the example -// for how to write your own usage function. - -// Usage prints to standard error a usage message documenting all defined command-line flags. -// The function is a variable that may be changed to point to a custom function. -// By default it prints a simple header and calls PrintDefaults; for details about the -// format of the output and how to control it, see the documentation for PrintDefaults. -var Usage = func() { - fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) - PrintDefaults() -} - -// NFlag returns the number of flags that have been set. -func (f *FlagSet) NFlag() int { return len(f.actual) } - -// NFlag returns the number of command-line flags that have been set. -func NFlag() int { return len(CommandLine.actual) } - -// Arg returns the i'th argument. Arg(0) is the first remaining argument -// after flags have been processed. -func (f *FlagSet) Arg(i int) string { - if i < 0 || i >= len(f.args) { - return "" - } - return f.args[i] -} - -// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument -// after flags have been processed. -func Arg(i int) string { - return CommandLine.Arg(i) -} - -// NArg is the number of arguments remaining after flags have been processed. -func (f *FlagSet) NArg() int { return len(f.args) } - -// NArg is the number of arguments remaining after flags have been processed. -func NArg() int { return len(CommandLine.args) } - -// Args returns the non-flag arguments. -func (f *FlagSet) Args() []string { return f.args } - -// Args returns the non-flag command-line arguments. -func Args() []string { return CommandLine.args } - -// Var defines a flag with the specified name and usage string. The type and -// value of the flag are represented by the first argument, of type Value, which -// typically holds a user-defined implementation of Value. For instance, the -// caller could create a flag that turns a comma-separated string into a slice -// of strings by giving the slice the methods of Value; in particular, Set would -// decompose the comma-separated string into the slice. -func (f *FlagSet) Var(value Value, name string, usage string) { - f.VarP(value, name, "", usage) -} - -// VarPF is like VarP, but returns the flag created -func (f *FlagSet) VarPF(value Value, name, shorthand, usage string) *Flag { - // Remember the default value as a string; it won't change. - flag := &Flag{ - Name: name, - Shorthand: shorthand, - Usage: usage, - Value: value, - DefValue: value.String(), - } - f.AddFlag(flag) - return flag -} - -// VarP is like Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) VarP(value Value, name, shorthand, usage string) { - f.VarPF(value, name, shorthand, usage) -} - -// AddFlag will add the flag to the FlagSet -func (f *FlagSet) AddFlag(flag *Flag) { - normalizedFlagName := f.normalizeFlagName(flag.Name) - - _, alreadyThere := f.formal[normalizedFlagName] - if alreadyThere { - msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name) - fmt.Fprintln(f.out(), msg) - panic(msg) // Happens only if flags are declared with identical names - } - if f.formal == nil { - f.formal = make(map[NormalizedName]*Flag) - } - - flag.Name = string(normalizedFlagName) - f.formal[normalizedFlagName] = flag - f.orderedFormal = append(f.orderedFormal, flag) - - if flag.Shorthand == "" { - return - } - if len(flag.Shorthand) > 1 { - msg := fmt.Sprintf("%q shorthand is more than one ASCII character", flag.Shorthand) - fmt.Fprintf(f.out(), msg) - panic(msg) - } - if f.shorthands == nil { - f.shorthands = make(map[byte]*Flag) - } - c := flag.Shorthand[0] - used, alreadyThere := f.shorthands[c] - if alreadyThere { - msg := fmt.Sprintf("unable to redefine %q shorthand in %q flagset: it's already used for %q flag", c, f.name, used.Name) - fmt.Fprintf(f.out(), msg) - panic(msg) - } - f.shorthands[c] = flag -} - -// AddFlagSet adds one FlagSet to another. If a flag is already present in f -// the flag from newSet will be ignored. -func (f *FlagSet) AddFlagSet(newSet *FlagSet) { - if newSet == nil { - return - } - newSet.VisitAll(func(flag *Flag) { - if f.Lookup(flag.Name) == nil { - f.AddFlag(flag) - } - }) -} - -// Var defines a flag with the specified name and usage string. The type and -// value of the flag are represented by the first argument, of type Value, which -// typically holds a user-defined implementation of Value. For instance, the -// caller could create a flag that turns a comma-separated string into a slice -// of strings by giving the slice the methods of Value; in particular, Set would -// decompose the comma-separated string into the slice. -func Var(value Value, name string, usage string) { - CommandLine.VarP(value, name, "", usage) -} - -// VarP is like Var, but accepts a shorthand letter that can be used after a single dash. -func VarP(value Value, name, shorthand, usage string) { - CommandLine.VarP(value, name, shorthand, usage) -} - -// failf prints to standard error a formatted error and usage message and -// returns the error. -func (f *FlagSet) failf(format string, a ...interface{}) error { - err := fmt.Errorf(format, a...) - if f.errorHandling != ContinueOnError { - fmt.Fprintln(f.out(), err) - f.usage() - } - return err -} - -// usage calls the Usage method for the flag set, or the usage function if -// the flag set is CommandLine. -func (f *FlagSet) usage() { - if f == CommandLine { - Usage() - } else if f.Usage == nil { - defaultUsage(f) - } else { - f.Usage() - } -} - -//--unknown (args will be empty) -//--unknown --next-flag ... (args will be --next-flag ...) -//--unknown arg ... (args will be arg ...) -func stripUnknownFlagValue(args []string) []string { - if len(args) == 0 { - //--unknown - return args - } - - first := args[0] - if len(first) > 0 && first[0] == '-' { - //--unknown --next-flag ... - return args - } - - //--unknown arg ... (args will be arg ...) - if len(args) > 1 { - return args[1:] - } - return nil -} - -func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []string, err error) { - a = args - name := s[2:] - if len(name) == 0 || name[0] == '-' || name[0] == '=' { - err = f.failf("bad flag syntax: %s", s) - return - } - - split := strings.SplitN(name, "=", 2) - name = split[0] - flag, exists := f.formal[f.normalizeFlagName(name)] - - if !exists { - switch { - case name == "help": - f.usage() - return a, ErrHelp - case f.ParseErrorsWhitelist.UnknownFlags: - // --unknown=unknownval arg ... - // we do not want to lose arg in this case - if len(split) >= 2 { - return a, nil - } - - return stripUnknownFlagValue(a), nil - default: - err = f.failf("unknown flag: --%s", name) - return - } - } - - var value string - if len(split) == 2 { - // '--flag=arg' - value = split[1] - } else if flag.NoOptDefVal != "" { - // '--flag' (arg was optional) - value = flag.NoOptDefVal - } else if len(a) > 0 { - // '--flag arg' - value = a[0] - a = a[1:] - } else { - // '--flag' (arg was required) - err = f.failf("flag needs an argument: %s", s) - return - } - - err = fn(flag, value) - if err != nil { - f.failf(err.Error()) - } - return -} - -func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parseFunc) (outShorts string, outArgs []string, err error) { - outArgs = args - - if strings.HasPrefix(shorthands, "test.") { - return - } - - outShorts = shorthands[1:] - c := shorthands[0] - - flag, exists := f.shorthands[c] - if !exists { - switch { - case c == 'h': - f.usage() - err = ErrHelp - return - case f.ParseErrorsWhitelist.UnknownFlags: - // '-f=arg arg ...' - // we do not want to lose arg in this case - if len(shorthands) > 2 && shorthands[1] == '=' { - outShorts = "" - return - } - - outArgs = stripUnknownFlagValue(outArgs) - return - default: - err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands) - return - } - } - - var value string - if len(shorthands) > 2 && shorthands[1] == '=' { - // '-f=arg' - value = shorthands[2:] - outShorts = "" - } else if flag.NoOptDefVal != "" { - // '-f' (arg was optional) - value = flag.NoOptDefVal - } else if len(shorthands) > 1 { - // '-farg' - value = shorthands[1:] - outShorts = "" - } else if len(args) > 0 { - // '-f arg' - value = args[0] - outArgs = args[1:] - } else { - // '-f' (arg was required) - err = f.failf("flag needs an argument: %q in -%s", c, shorthands) - return - } - - if flag.ShorthandDeprecated != "" { - fmt.Fprintf(f.out(), "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated) - } - - err = fn(flag, value) - if err != nil { - f.failf(err.Error()) - } - return -} - -func (f *FlagSet) parseShortArg(s string, args []string, fn parseFunc) (a []string, err error) { - a = args - shorthands := s[1:] - - // "shorthands" can be a series of shorthand letters of flags (e.g. "-vvv"). - for len(shorthands) > 0 { - shorthands, a, err = f.parseSingleShortArg(shorthands, args, fn) - if err != nil { - return - } - } - - return -} - -func (f *FlagSet) parseArgs(args []string, fn parseFunc) (err error) { - for len(args) > 0 { - s := args[0] - args = args[1:] - if len(s) == 0 || s[0] != '-' || len(s) == 1 { - if !f.interspersed { - f.args = append(f.args, s) - f.args = append(f.args, args...) - return nil - } - f.args = append(f.args, s) - continue - } - - if s[1] == '-' { - if len(s) == 2 { // "--" terminates the flags - f.argsLenAtDash = len(f.args) - f.args = append(f.args, args...) - break - } - args, err = f.parseLongArg(s, args, fn) - } else { - args, err = f.parseShortArg(s, args, fn) - } - if err != nil { - return - } - } - return -} - -// Parse parses flag definitions from the argument list, which should not -// include the command name. Must be called after all flags in the FlagSet -// are defined and before flags are accessed by the program. -// The return value will be ErrHelp if -help was set but not defined. -func (f *FlagSet) Parse(arguments []string) error { - if f.addedGoFlagSets != nil { - for _, goFlagSet := range f.addedGoFlagSets { - goFlagSet.Parse(nil) - } - } - f.parsed = true - - if len(arguments) < 0 { - return nil - } - - f.args = make([]string, 0, len(arguments)) - - set := func(flag *Flag, value string) error { - return f.Set(flag.Name, value) - } - - err := f.parseArgs(arguments, set) - if err != nil { - switch f.errorHandling { - case ContinueOnError: - return err - case ExitOnError: - fmt.Println(err) - os.Exit(2) - case PanicOnError: - panic(err) - } - } - return nil -} - -type parseFunc func(flag *Flag, value string) error - -// ParseAll parses flag definitions from the argument list, which should not -// include the command name. The arguments for fn are flag and value. Must be -// called after all flags in the FlagSet are defined and before flags are -// accessed by the program. The return value will be ErrHelp if -help was set -// but not defined. -func (f *FlagSet) ParseAll(arguments []string, fn func(flag *Flag, value string) error) error { - f.parsed = true - f.args = make([]string, 0, len(arguments)) - - err := f.parseArgs(arguments, fn) - if err != nil { - switch f.errorHandling { - case ContinueOnError: - return err - case ExitOnError: - os.Exit(2) - case PanicOnError: - panic(err) - } - } - return nil -} - -// Parsed reports whether f.Parse has been called. -func (f *FlagSet) Parsed() bool { - return f.parsed -} - -// Parse parses the command-line flags from os.Args[1:]. Must be called -// after all flags are defined and before flags are accessed by the program. -func Parse() { - // Ignore errors; CommandLine is set for ExitOnError. - CommandLine.Parse(os.Args[1:]) -} - -// ParseAll parses the command-line flags from os.Args[1:] and called fn for each. -// The arguments for fn are flag and value. Must be called after all flags are -// defined and before flags are accessed by the program. -func ParseAll(fn func(flag *Flag, value string) error) { - // Ignore errors; CommandLine is set for ExitOnError. - CommandLine.ParseAll(os.Args[1:], fn) -} - -// SetInterspersed sets whether to support interspersed option/non-option arguments. -func SetInterspersed(interspersed bool) { - CommandLine.SetInterspersed(interspersed) -} - -// Parsed returns true if the command-line flags have been parsed. -func Parsed() bool { - return CommandLine.Parsed() -} - -// CommandLine is the default set of command-line flags, parsed from os.Args. -var CommandLine = NewFlagSet(os.Args[0], ExitOnError) - -// NewFlagSet returns a new, empty flag set with the specified name, -// error handling property and SortFlags set to true. -func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet { - f := &FlagSet{ - name: name, - errorHandling: errorHandling, - argsLenAtDash: -1, - interspersed: true, - SortFlags: true, - } - return f -} - -// SetInterspersed sets whether to support interspersed option/non-option arguments. -func (f *FlagSet) SetInterspersed(interspersed bool) { - f.interspersed = interspersed -} - -// Init sets the name and error handling property for a flag set. -// By default, the zero FlagSet uses an empty name and the -// ContinueOnError error handling policy. -func (f *FlagSet) Init(name string, errorHandling ErrorHandling) { - f.name = name - f.errorHandling = errorHandling - f.argsLenAtDash = -1 -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/float32.go b/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/float32.go deleted file mode 100644 index a243f81f7fb..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/float32.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- float32 Value -type float32Value float32 - -func newFloat32Value(val float32, p *float32) *float32Value { - *p = val - return (*float32Value)(p) -} - -func (f *float32Value) Set(s string) error { - v, err := strconv.ParseFloat(s, 32) - *f = float32Value(v) - return err -} - -func (f *float32Value) Type() string { - return "float32" -} - -func (f *float32Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 32) } - -func float32Conv(sval string) (interface{}, error) { - v, err := strconv.ParseFloat(sval, 32) - if err != nil { - return 0, err - } - return float32(v), nil -} - -// GetFloat32 return the float32 value of a flag with the given name -func (f *FlagSet) GetFloat32(name string) (float32, error) { - val, err := f.getFlagType(name, "float32", float32Conv) - if err != nil { - return 0, err - } - return val.(float32), nil -} - -// Float32Var defines a float32 flag with specified name, default value, and usage string. -// The argument p points to a float32 variable in which to store the value of the flag. -func (f *FlagSet) Float32Var(p *float32, name string, value float32, usage string) { - f.VarP(newFloat32Value(value, p), name, "", usage) -} - -// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float32VarP(p *float32, name, shorthand string, value float32, usage string) { - f.VarP(newFloat32Value(value, p), name, shorthand, usage) -} - -// Float32Var defines a float32 flag with specified name, default value, and usage string. -// The argument p points to a float32 variable in which to store the value of the flag. -func Float32Var(p *float32, name string, value float32, usage string) { - CommandLine.VarP(newFloat32Value(value, p), name, "", usage) -} - -// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash. -func Float32VarP(p *float32, name, shorthand string, value float32, usage string) { - CommandLine.VarP(newFloat32Value(value, p), name, shorthand, usage) -} - -// Float32 defines a float32 flag with specified name, default value, and usage string. -// The return value is the address of a float32 variable that stores the value of the flag. -func (f *FlagSet) Float32(name string, value float32, usage string) *float32 { - p := new(float32) - f.Float32VarP(p, name, "", value, usage) - return p -} - -// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float32P(name, shorthand string, value float32, usage string) *float32 { - p := new(float32) - f.Float32VarP(p, name, shorthand, value, usage) - return p -} - -// Float32 defines a float32 flag with specified name, default value, and usage string. -// The return value is the address of a float32 variable that stores the value of the flag. -func Float32(name string, value float32, usage string) *float32 { - return CommandLine.Float32P(name, "", value, usage) -} - -// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash. -func Float32P(name, shorthand string, value float32, usage string) *float32 { - return CommandLine.Float32P(name, shorthand, value, usage) -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/float64.go b/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/float64.go deleted file mode 100644 index 04b5492a7d3..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/float64.go +++ /dev/null @@ -1,84 +0,0 @@ -package pflag - -import "strconv" - -// -- float64 Value -type float64Value float64 - -func newFloat64Value(val float64, p *float64) *float64Value { - *p = val - return (*float64Value)(p) -} - -func (f *float64Value) Set(s string) error { - v, err := strconv.ParseFloat(s, 64) - *f = float64Value(v) - return err -} - -func (f *float64Value) Type() string { - return "float64" -} - -func (f *float64Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 64) } - -func float64Conv(sval string) (interface{}, error) { - return strconv.ParseFloat(sval, 64) -} - -// GetFloat64 return the float64 value of a flag with the given name -func (f *FlagSet) GetFloat64(name string) (float64, error) { - val, err := f.getFlagType(name, "float64", float64Conv) - if err != nil { - return 0, err - } - return val.(float64), nil -} - -// Float64Var defines a float64 flag with specified name, default value, and usage string. -// The argument p points to a float64 variable in which to store the value of the flag. -func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) { - f.VarP(newFloat64Value(value, p), name, "", usage) -} - -// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float64VarP(p *float64, name, shorthand string, value float64, usage string) { - f.VarP(newFloat64Value(value, p), name, shorthand, usage) -} - -// Float64Var defines a float64 flag with specified name, default value, and usage string. -// The argument p points to a float64 variable in which to store the value of the flag. -func Float64Var(p *float64, name string, value float64, usage string) { - CommandLine.VarP(newFloat64Value(value, p), name, "", usage) -} - -// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash. -func Float64VarP(p *float64, name, shorthand string, value float64, usage string) { - CommandLine.VarP(newFloat64Value(value, p), name, shorthand, usage) -} - -// Float64 defines a float64 flag with specified name, default value, and usage string. -// The return value is the address of a float64 variable that stores the value of the flag. -func (f *FlagSet) Float64(name string, value float64, usage string) *float64 { - p := new(float64) - f.Float64VarP(p, name, "", value, usage) - return p -} - -// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float64P(name, shorthand string, value float64, usage string) *float64 { - p := new(float64) - f.Float64VarP(p, name, shorthand, value, usage) - return p -} - -// Float64 defines a float64 flag with specified name, default value, and usage string. -// The return value is the address of a float64 variable that stores the value of the flag. -func Float64(name string, value float64, usage string) *float64 { - return CommandLine.Float64P(name, "", value, usage) -} - -// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash. -func Float64P(name, shorthand string, value float64, usage string) *float64 { - return CommandLine.Float64P(name, shorthand, value, usage) -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/golangflag.go b/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/golangflag.go deleted file mode 100644 index d3dd72b7fee..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/golangflag.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pflag - -import ( - goflag "flag" - "reflect" - "strings" -) - -// flagValueWrapper implements pflag.Value around a flag.Value. The main -// difference here is the addition of the Type method that returns a string -// name of the type. As this is generally unknown, we approximate that with -// reflection. -type flagValueWrapper struct { - inner goflag.Value - flagType string -} - -// We are just copying the boolFlag interface out of goflag as that is what -// they use to decide if a flag should get "true" when no arg is given. -type goBoolFlag interface { - goflag.Value - IsBoolFlag() bool -} - -func wrapFlagValue(v goflag.Value) Value { - // If the flag.Value happens to also be a pflag.Value, just use it directly. - if pv, ok := v.(Value); ok { - return pv - } - - pv := &flagValueWrapper{ - inner: v, - } - - t := reflect.TypeOf(v) - if t.Kind() == reflect.Interface || t.Kind() == reflect.Ptr { - t = t.Elem() - } - - pv.flagType = strings.TrimSuffix(t.Name(), "Value") - return pv -} - -func (v *flagValueWrapper) String() string { - return v.inner.String() -} - -func (v *flagValueWrapper) Set(s string) error { - return v.inner.Set(s) -} - -func (v *flagValueWrapper) Type() string { - return v.flagType -} - -// PFlagFromGoFlag will return a *pflag.Flag given a *flag.Flag -// If the *flag.Flag.Name was a single character (ex: `v`) it will be accessiblei -// with both `-v` and `--v` in flags. If the golang flag was more than a single -// character (ex: `verbose`) it will only be accessible via `--verbose` -func PFlagFromGoFlag(goflag *goflag.Flag) *Flag { - // Remember the default value as a string; it won't change. - flag := &Flag{ - Name: goflag.Name, - Usage: goflag.Usage, - Value: wrapFlagValue(goflag.Value), - // Looks like golang flags don't set DefValue correctly :-( - //DefValue: goflag.DefValue, - DefValue: goflag.Value.String(), - } - // Ex: if the golang flag was -v, allow both -v and --v to work - if len(flag.Name) == 1 { - flag.Shorthand = flag.Name - } - if fv, ok := goflag.Value.(goBoolFlag); ok && fv.IsBoolFlag() { - flag.NoOptDefVal = "true" - } - return flag -} - -// AddGoFlag will add the given *flag.Flag to the pflag.FlagSet -func (f *FlagSet) AddGoFlag(goflag *goflag.Flag) { - if f.Lookup(goflag.Name) != nil { - return - } - newflag := PFlagFromGoFlag(goflag) - f.AddFlag(newflag) -} - -// AddGoFlagSet will add the given *flag.FlagSet to the pflag.FlagSet -func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) { - if newSet == nil { - return - } - newSet.VisitAll(func(goflag *goflag.Flag) { - f.AddGoFlag(goflag) - }) - if f.addedGoFlagSets == nil { - f.addedGoFlagSets = make([]*goflag.FlagSet, 0) - } - f.addedGoFlagSets = append(f.addedGoFlagSets, newSet) -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/int.go b/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/int.go deleted file mode 100644 index 1474b89df66..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/int.go +++ /dev/null @@ -1,84 +0,0 @@ -package pflag - -import "strconv" - -// -- int Value -type intValue int - -func newIntValue(val int, p *int) *intValue { - *p = val - return (*intValue)(p) -} - -func (i *intValue) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 64) - *i = intValue(v) - return err -} - -func (i *intValue) Type() string { - return "int" -} - -func (i *intValue) String() string { return strconv.Itoa(int(*i)) } - -func intConv(sval string) (interface{}, error) { - return strconv.Atoi(sval) -} - -// GetInt return the int value of a flag with the given name -func (f *FlagSet) GetInt(name string) (int, error) { - val, err := f.getFlagType(name, "int", intConv) - if err != nil { - return 0, err - } - return val.(int), nil -} - -// IntVar defines an int flag with specified name, default value, and usage string. -// The argument p points to an int variable in which to store the value of the flag. -func (f *FlagSet) IntVar(p *int, name string, value int, usage string) { - f.VarP(newIntValue(value, p), name, "", usage) -} - -// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IntVarP(p *int, name, shorthand string, value int, usage string) { - f.VarP(newIntValue(value, p), name, shorthand, usage) -} - -// IntVar defines an int flag with specified name, default value, and usage string. -// The argument p points to an int variable in which to store the value of the flag. -func IntVar(p *int, name string, value int, usage string) { - CommandLine.VarP(newIntValue(value, p), name, "", usage) -} - -// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash. -func IntVarP(p *int, name, shorthand string, value int, usage string) { - CommandLine.VarP(newIntValue(value, p), name, shorthand, usage) -} - -// Int defines an int flag with specified name, default value, and usage string. -// The return value is the address of an int variable that stores the value of the flag. -func (f *FlagSet) Int(name string, value int, usage string) *int { - p := new(int) - f.IntVarP(p, name, "", value, usage) - return p -} - -// IntP is like Int, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IntP(name, shorthand string, value int, usage string) *int { - p := new(int) - f.IntVarP(p, name, shorthand, value, usage) - return p -} - -// Int defines an int flag with specified name, default value, and usage string. -// The return value is the address of an int variable that stores the value of the flag. -func Int(name string, value int, usage string) *int { - return CommandLine.IntP(name, "", value, usage) -} - -// IntP is like Int, but accepts a shorthand letter that can be used after a single dash. -func IntP(name, shorthand string, value int, usage string) *int { - return CommandLine.IntP(name, shorthand, value, usage) -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/int16.go b/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/int16.go deleted file mode 100644 index f1a01d05e69..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/int16.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- int16 Value -type int16Value int16 - -func newInt16Value(val int16, p *int16) *int16Value { - *p = val - return (*int16Value)(p) -} - -func (i *int16Value) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 16) - *i = int16Value(v) - return err -} - -func (i *int16Value) Type() string { - return "int16" -} - -func (i *int16Value) String() string { return strconv.FormatInt(int64(*i), 10) } - -func int16Conv(sval string) (interface{}, error) { - v, err := strconv.ParseInt(sval, 0, 16) - if err != nil { - return 0, err - } - return int16(v), nil -} - -// GetInt16 returns the int16 value of a flag with the given name -func (f *FlagSet) GetInt16(name string) (int16, error) { - val, err := f.getFlagType(name, "int16", int16Conv) - if err != nil { - return 0, err - } - return val.(int16), nil -} - -// Int16Var defines an int16 flag with specified name, default value, and usage string. -// The argument p points to an int16 variable in which to store the value of the flag. -func (f *FlagSet) Int16Var(p *int16, name string, value int16, usage string) { - f.VarP(newInt16Value(value, p), name, "", usage) -} - -// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int16VarP(p *int16, name, shorthand string, value int16, usage string) { - f.VarP(newInt16Value(value, p), name, shorthand, usage) -} - -// Int16Var defines an int16 flag with specified name, default value, and usage string. -// The argument p points to an int16 variable in which to store the value of the flag. -func Int16Var(p *int16, name string, value int16, usage string) { - CommandLine.VarP(newInt16Value(value, p), name, "", usage) -} - -// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash. -func Int16VarP(p *int16, name, shorthand string, value int16, usage string) { - CommandLine.VarP(newInt16Value(value, p), name, shorthand, usage) -} - -// Int16 defines an int16 flag with specified name, default value, and usage string. -// The return value is the address of an int16 variable that stores the value of the flag. -func (f *FlagSet) Int16(name string, value int16, usage string) *int16 { - p := new(int16) - f.Int16VarP(p, name, "", value, usage) - return p -} - -// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int16P(name, shorthand string, value int16, usage string) *int16 { - p := new(int16) - f.Int16VarP(p, name, shorthand, value, usage) - return p -} - -// Int16 defines an int16 flag with specified name, default value, and usage string. -// The return value is the address of an int16 variable that stores the value of the flag. -func Int16(name string, value int16, usage string) *int16 { - return CommandLine.Int16P(name, "", value, usage) -} - -// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash. -func Int16P(name, shorthand string, value int16, usage string) *int16 { - return CommandLine.Int16P(name, shorthand, value, usage) -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/int32.go b/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/int32.go deleted file mode 100644 index 9b95944f0fe..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/int32.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- int32 Value -type int32Value int32 - -func newInt32Value(val int32, p *int32) *int32Value { - *p = val - return (*int32Value)(p) -} - -func (i *int32Value) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 32) - *i = int32Value(v) - return err -} - -func (i *int32Value) Type() string { - return "int32" -} - -func (i *int32Value) String() string { return strconv.FormatInt(int64(*i), 10) } - -func int32Conv(sval string) (interface{}, error) { - v, err := strconv.ParseInt(sval, 0, 32) - if err != nil { - return 0, err - } - return int32(v), nil -} - -// GetInt32 return the int32 value of a flag with the given name -func (f *FlagSet) GetInt32(name string) (int32, error) { - val, err := f.getFlagType(name, "int32", int32Conv) - if err != nil { - return 0, err - } - return val.(int32), nil -} - -// Int32Var defines an int32 flag with specified name, default value, and usage string. -// The argument p points to an int32 variable in which to store the value of the flag. -func (f *FlagSet) Int32Var(p *int32, name string, value int32, usage string) { - f.VarP(newInt32Value(value, p), name, "", usage) -} - -// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int32VarP(p *int32, name, shorthand string, value int32, usage string) { - f.VarP(newInt32Value(value, p), name, shorthand, usage) -} - -// Int32Var defines an int32 flag with specified name, default value, and usage string. -// The argument p points to an int32 variable in which to store the value of the flag. -func Int32Var(p *int32, name string, value int32, usage string) { - CommandLine.VarP(newInt32Value(value, p), name, "", usage) -} - -// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash. -func Int32VarP(p *int32, name, shorthand string, value int32, usage string) { - CommandLine.VarP(newInt32Value(value, p), name, shorthand, usage) -} - -// Int32 defines an int32 flag with specified name, default value, and usage string. -// The return value is the address of an int32 variable that stores the value of the flag. -func (f *FlagSet) Int32(name string, value int32, usage string) *int32 { - p := new(int32) - f.Int32VarP(p, name, "", value, usage) - return p -} - -// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int32P(name, shorthand string, value int32, usage string) *int32 { - p := new(int32) - f.Int32VarP(p, name, shorthand, value, usage) - return p -} - -// Int32 defines an int32 flag with specified name, default value, and usage string. -// The return value is the address of an int32 variable that stores the value of the flag. -func Int32(name string, value int32, usage string) *int32 { - return CommandLine.Int32P(name, "", value, usage) -} - -// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash. -func Int32P(name, shorthand string, value int32, usage string) *int32 { - return CommandLine.Int32P(name, shorthand, value, usage) -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/int64.go b/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/int64.go deleted file mode 100644 index 0026d781d9f..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/int64.go +++ /dev/null @@ -1,84 +0,0 @@ -package pflag - -import "strconv" - -// -- int64 Value -type int64Value int64 - -func newInt64Value(val int64, p *int64) *int64Value { - *p = val - return (*int64Value)(p) -} - -func (i *int64Value) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 64) - *i = int64Value(v) - return err -} - -func (i *int64Value) Type() string { - return "int64" -} - -func (i *int64Value) String() string { return strconv.FormatInt(int64(*i), 10) } - -func int64Conv(sval string) (interface{}, error) { - return strconv.ParseInt(sval, 0, 64) -} - -// GetInt64 return the int64 value of a flag with the given name -func (f *FlagSet) GetInt64(name string) (int64, error) { - val, err := f.getFlagType(name, "int64", int64Conv) - if err != nil { - return 0, err - } - return val.(int64), nil -} - -// Int64Var defines an int64 flag with specified name, default value, and usage string. -// The argument p points to an int64 variable in which to store the value of the flag. -func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) { - f.VarP(newInt64Value(value, p), name, "", usage) -} - -// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int64VarP(p *int64, name, shorthand string, value int64, usage string) { - f.VarP(newInt64Value(value, p), name, shorthand, usage) -} - -// Int64Var defines an int64 flag with specified name, default value, and usage string. -// The argument p points to an int64 variable in which to store the value of the flag. -func Int64Var(p *int64, name string, value int64, usage string) { - CommandLine.VarP(newInt64Value(value, p), name, "", usage) -} - -// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash. -func Int64VarP(p *int64, name, shorthand string, value int64, usage string) { - CommandLine.VarP(newInt64Value(value, p), name, shorthand, usage) -} - -// Int64 defines an int64 flag with specified name, default value, and usage string. -// The return value is the address of an int64 variable that stores the value of the flag. -func (f *FlagSet) Int64(name string, value int64, usage string) *int64 { - p := new(int64) - f.Int64VarP(p, name, "", value, usage) - return p -} - -// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int64P(name, shorthand string, value int64, usage string) *int64 { - p := new(int64) - f.Int64VarP(p, name, shorthand, value, usage) - return p -} - -// Int64 defines an int64 flag with specified name, default value, and usage string. -// The return value is the address of an int64 variable that stores the value of the flag. -func Int64(name string, value int64, usage string) *int64 { - return CommandLine.Int64P(name, "", value, usage) -} - -// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash. -func Int64P(name, shorthand string, value int64, usage string) *int64 { - return CommandLine.Int64P(name, shorthand, value, usage) -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/int8.go b/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/int8.go deleted file mode 100644 index 4da92228e63..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/int8.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- int8 Value -type int8Value int8 - -func newInt8Value(val int8, p *int8) *int8Value { - *p = val - return (*int8Value)(p) -} - -func (i *int8Value) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 8) - *i = int8Value(v) - return err -} - -func (i *int8Value) Type() string { - return "int8" -} - -func (i *int8Value) String() string { return strconv.FormatInt(int64(*i), 10) } - -func int8Conv(sval string) (interface{}, error) { - v, err := strconv.ParseInt(sval, 0, 8) - if err != nil { - return 0, err - } - return int8(v), nil -} - -// GetInt8 return the int8 value of a flag with the given name -func (f *FlagSet) GetInt8(name string) (int8, error) { - val, err := f.getFlagType(name, "int8", int8Conv) - if err != nil { - return 0, err - } - return val.(int8), nil -} - -// Int8Var defines an int8 flag with specified name, default value, and usage string. -// The argument p points to an int8 variable in which to store the value of the flag. -func (f *FlagSet) Int8Var(p *int8, name string, value int8, usage string) { - f.VarP(newInt8Value(value, p), name, "", usage) -} - -// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int8VarP(p *int8, name, shorthand string, value int8, usage string) { - f.VarP(newInt8Value(value, p), name, shorthand, usage) -} - -// Int8Var defines an int8 flag with specified name, default value, and usage string. -// The argument p points to an int8 variable in which to store the value of the flag. -func Int8Var(p *int8, name string, value int8, usage string) { - CommandLine.VarP(newInt8Value(value, p), name, "", usage) -} - -// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash. -func Int8VarP(p *int8, name, shorthand string, value int8, usage string) { - CommandLine.VarP(newInt8Value(value, p), name, shorthand, usage) -} - -// Int8 defines an int8 flag with specified name, default value, and usage string. -// The return value is the address of an int8 variable that stores the value of the flag. -func (f *FlagSet) Int8(name string, value int8, usage string) *int8 { - p := new(int8) - f.Int8VarP(p, name, "", value, usage) - return p -} - -// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int8P(name, shorthand string, value int8, usage string) *int8 { - p := new(int8) - f.Int8VarP(p, name, shorthand, value, usage) - return p -} - -// Int8 defines an int8 flag with specified name, default value, and usage string. -// The return value is the address of an int8 variable that stores the value of the flag. -func Int8(name string, value int8, usage string) *int8 { - return CommandLine.Int8P(name, "", value, usage) -} - -// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash. -func Int8P(name, shorthand string, value int8, usage string) *int8 { - return CommandLine.Int8P(name, shorthand, value, usage) -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/int_slice.go b/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/int_slice.go deleted file mode 100644 index 1e7c9edde95..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/int_slice.go +++ /dev/null @@ -1,128 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" - "strings" -) - -// -- intSlice Value -type intSliceValue struct { - value *[]int - changed bool -} - -func newIntSliceValue(val []int, p *[]int) *intSliceValue { - isv := new(intSliceValue) - isv.value = p - *isv.value = val - return isv -} - -func (s *intSliceValue) Set(val string) error { - ss := strings.Split(val, ",") - out := make([]int, len(ss)) - for i, d := range ss { - var err error - out[i], err = strconv.Atoi(d) - if err != nil { - return err - } - - } - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - s.changed = true - return nil -} - -func (s *intSliceValue) Type() string { - return "intSlice" -} - -func (s *intSliceValue) String() string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = fmt.Sprintf("%d", d) - } - return "[" + strings.Join(out, ",") + "]" -} - -func intSliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []int{}, nil - } - ss := strings.Split(val, ",") - out := make([]int, len(ss)) - for i, d := range ss { - var err error - out[i], err = strconv.Atoi(d) - if err != nil { - return nil, err - } - - } - return out, nil -} - -// GetIntSlice return the []int value of a flag with the given name -func (f *FlagSet) GetIntSlice(name string) ([]int, error) { - val, err := f.getFlagType(name, "intSlice", intSliceConv) - if err != nil { - return []int{}, err - } - return val.([]int), nil -} - -// IntSliceVar defines a intSlice flag with specified name, default value, and usage string. -// The argument p points to a []int variable in which to store the value of the flag. -func (f *FlagSet) IntSliceVar(p *[]int, name string, value []int, usage string) { - f.VarP(newIntSliceValue(value, p), name, "", usage) -} - -// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) { - f.VarP(newIntSliceValue(value, p), name, shorthand, usage) -} - -// IntSliceVar defines a int[] flag with specified name, default value, and usage string. -// The argument p points to a int[] variable in which to store the value of the flag. -func IntSliceVar(p *[]int, name string, value []int, usage string) { - CommandLine.VarP(newIntSliceValue(value, p), name, "", usage) -} - -// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash. -func IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) { - CommandLine.VarP(newIntSliceValue(value, p), name, shorthand, usage) -} - -// IntSlice defines a []int flag with specified name, default value, and usage string. -// The return value is the address of a []int variable that stores the value of the flag. -func (f *FlagSet) IntSlice(name string, value []int, usage string) *[]int { - p := []int{} - f.IntSliceVarP(&p, name, "", value, usage) - return &p -} - -// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IntSliceP(name, shorthand string, value []int, usage string) *[]int { - p := []int{} - f.IntSliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// IntSlice defines a []int flag with specified name, default value, and usage string. -// The return value is the address of a []int variable that stores the value of the flag. -func IntSlice(name string, value []int, usage string) *[]int { - return CommandLine.IntSliceP(name, "", value, usage) -} - -// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash. -func IntSliceP(name, shorthand string, value []int, usage string) *[]int { - return CommandLine.IntSliceP(name, shorthand, value, usage) -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/ip.go b/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/ip.go deleted file mode 100644 index 3d414ba69fe..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/ip.go +++ /dev/null @@ -1,94 +0,0 @@ -package pflag - -import ( - "fmt" - "net" - "strings" -) - -// -- net.IP value -type ipValue net.IP - -func newIPValue(val net.IP, p *net.IP) *ipValue { - *p = val - return (*ipValue)(p) -} - -func (i *ipValue) String() string { return net.IP(*i).String() } -func (i *ipValue) Set(s string) error { - ip := net.ParseIP(strings.TrimSpace(s)) - if ip == nil { - return fmt.Errorf("failed to parse IP: %q", s) - } - *i = ipValue(ip) - return nil -} - -func (i *ipValue) Type() string { - return "ip" -} - -func ipConv(sval string) (interface{}, error) { - ip := net.ParseIP(sval) - if ip != nil { - return ip, nil - } - return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval) -} - -// GetIP return the net.IP value of a flag with the given name -func (f *FlagSet) GetIP(name string) (net.IP, error) { - val, err := f.getFlagType(name, "ip", ipConv) - if err != nil { - return nil, err - } - return val.(net.IP), nil -} - -// IPVar defines an net.IP flag with specified name, default value, and usage string. -// The argument p points to an net.IP variable in which to store the value of the flag. -func (f *FlagSet) IPVar(p *net.IP, name string, value net.IP, usage string) { - f.VarP(newIPValue(value, p), name, "", usage) -} - -// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) { - f.VarP(newIPValue(value, p), name, shorthand, usage) -} - -// IPVar defines an net.IP flag with specified name, default value, and usage string. -// The argument p points to an net.IP variable in which to store the value of the flag. -func IPVar(p *net.IP, name string, value net.IP, usage string) { - CommandLine.VarP(newIPValue(value, p), name, "", usage) -} - -// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash. -func IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) { - CommandLine.VarP(newIPValue(value, p), name, shorthand, usage) -} - -// IP defines an net.IP flag with specified name, default value, and usage string. -// The return value is the address of an net.IP variable that stores the value of the flag. -func (f *FlagSet) IP(name string, value net.IP, usage string) *net.IP { - p := new(net.IP) - f.IPVarP(p, name, "", value, usage) - return p -} - -// IPP is like IP, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPP(name, shorthand string, value net.IP, usage string) *net.IP { - p := new(net.IP) - f.IPVarP(p, name, shorthand, value, usage) - return p -} - -// IP defines an net.IP flag with specified name, default value, and usage string. -// The return value is the address of an net.IP variable that stores the value of the flag. -func IP(name string, value net.IP, usage string) *net.IP { - return CommandLine.IPP(name, "", value, usage) -} - -// IPP is like IP, but accepts a shorthand letter that can be used after a single dash. -func IPP(name, shorthand string, value net.IP, usage string) *net.IP { - return CommandLine.IPP(name, shorthand, value, usage) -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/ip_slice.go b/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/ip_slice.go deleted file mode 100644 index 7dd196fe3fb..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/ip_slice.go +++ /dev/null @@ -1,148 +0,0 @@ -package pflag - -import ( - "fmt" - "io" - "net" - "strings" -) - -// -- ipSlice Value -type ipSliceValue struct { - value *[]net.IP - changed bool -} - -func newIPSliceValue(val []net.IP, p *[]net.IP) *ipSliceValue { - ipsv := new(ipSliceValue) - ipsv.value = p - *ipsv.value = val - return ipsv -} - -// Set converts, and assigns, the comma-separated IP argument string representation as the []net.IP value of this flag. -// If Set is called on a flag that already has a []net.IP assigned, the newly converted values will be appended. -func (s *ipSliceValue) Set(val string) error { - - // remove all quote characters - rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "") - - // read flag arguments with CSV parser - ipStrSlice, err := readAsCSV(rmQuote.Replace(val)) - if err != nil && err != io.EOF { - return err - } - - // parse ip values into slice - out := make([]net.IP, 0, len(ipStrSlice)) - for _, ipStr := range ipStrSlice { - ip := net.ParseIP(strings.TrimSpace(ipStr)) - if ip == nil { - return fmt.Errorf("invalid string being converted to IP address: %s", ipStr) - } - out = append(out, ip) - } - - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - - s.changed = true - - return nil -} - -// Type returns a string that uniquely represents this flag's type. -func (s *ipSliceValue) Type() string { - return "ipSlice" -} - -// String defines a "native" format for this net.IP slice flag value. -func (s *ipSliceValue) String() string { - - ipStrSlice := make([]string, len(*s.value)) - for i, ip := range *s.value { - ipStrSlice[i] = ip.String() - } - - out, _ := writeAsCSV(ipStrSlice) - - return "[" + out + "]" -} - -func ipSliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Emtpy string would cause a slice with one (empty) entry - if len(val) == 0 { - return []net.IP{}, nil - } - ss := strings.Split(val, ",") - out := make([]net.IP, len(ss)) - for i, sval := range ss { - ip := net.ParseIP(strings.TrimSpace(sval)) - if ip == nil { - return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval) - } - out[i] = ip - } - return out, nil -} - -// GetIPSlice returns the []net.IP value of a flag with the given name -func (f *FlagSet) GetIPSlice(name string) ([]net.IP, error) { - val, err := f.getFlagType(name, "ipSlice", ipSliceConv) - if err != nil { - return []net.IP{}, err - } - return val.([]net.IP), nil -} - -// IPSliceVar defines a ipSlice flag with specified name, default value, and usage string. -// The argument p points to a []net.IP variable in which to store the value of the flag. -func (f *FlagSet) IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) { - f.VarP(newIPSliceValue(value, p), name, "", usage) -} - -// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) { - f.VarP(newIPSliceValue(value, p), name, shorthand, usage) -} - -// IPSliceVar defines a []net.IP flag with specified name, default value, and usage string. -// The argument p points to a []net.IP variable in which to store the value of the flag. -func IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) { - CommandLine.VarP(newIPSliceValue(value, p), name, "", usage) -} - -// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash. -func IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) { - CommandLine.VarP(newIPSliceValue(value, p), name, shorthand, usage) -} - -// IPSlice defines a []net.IP flag with specified name, default value, and usage string. -// The return value is the address of a []net.IP variable that stores the value of that flag. -func (f *FlagSet) IPSlice(name string, value []net.IP, usage string) *[]net.IP { - p := []net.IP{} - f.IPSliceVarP(&p, name, "", value, usage) - return &p -} - -// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP { - p := []net.IP{} - f.IPSliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// IPSlice defines a []net.IP flag with specified name, default value, and usage string. -// The return value is the address of a []net.IP variable that stores the value of the flag. -func IPSlice(name string, value []net.IP, usage string) *[]net.IP { - return CommandLine.IPSliceP(name, "", value, usage) -} - -// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash. -func IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP { - return CommandLine.IPSliceP(name, shorthand, value, usage) -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/ipmask.go b/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/ipmask.go deleted file mode 100644 index 5bd44bd21d2..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/ipmask.go +++ /dev/null @@ -1,122 +0,0 @@ -package pflag - -import ( - "fmt" - "net" - "strconv" -) - -// -- net.IPMask value -type ipMaskValue net.IPMask - -func newIPMaskValue(val net.IPMask, p *net.IPMask) *ipMaskValue { - *p = val - return (*ipMaskValue)(p) -} - -func (i *ipMaskValue) String() string { return net.IPMask(*i).String() } -func (i *ipMaskValue) Set(s string) error { - ip := ParseIPv4Mask(s) - if ip == nil { - return fmt.Errorf("failed to parse IP mask: %q", s) - } - *i = ipMaskValue(ip) - return nil -} - -func (i *ipMaskValue) Type() string { - return "ipMask" -} - -// ParseIPv4Mask written in IP form (e.g. 255.255.255.0). -// This function should really belong to the net package. -func ParseIPv4Mask(s string) net.IPMask { - mask := net.ParseIP(s) - if mask == nil { - if len(s) != 8 { - return nil - } - // net.IPMask.String() actually outputs things like ffffff00 - // so write a horrible parser for that as well :-( - m := []int{} - for i := 0; i < 4; i++ { - b := "0x" + s[2*i:2*i+2] - d, err := strconv.ParseInt(b, 0, 0) - if err != nil { - return nil - } - m = append(m, int(d)) - } - s := fmt.Sprintf("%d.%d.%d.%d", m[0], m[1], m[2], m[3]) - mask = net.ParseIP(s) - if mask == nil { - return nil - } - } - return net.IPv4Mask(mask[12], mask[13], mask[14], mask[15]) -} - -func parseIPv4Mask(sval string) (interface{}, error) { - mask := ParseIPv4Mask(sval) - if mask == nil { - return nil, fmt.Errorf("unable to parse %s as net.IPMask", sval) - } - return mask, nil -} - -// GetIPv4Mask return the net.IPv4Mask value of a flag with the given name -func (f *FlagSet) GetIPv4Mask(name string) (net.IPMask, error) { - val, err := f.getFlagType(name, "ipMask", parseIPv4Mask) - if err != nil { - return nil, err - } - return val.(net.IPMask), nil -} - -// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string. -// The argument p points to an net.IPMask variable in which to store the value of the flag. -func (f *FlagSet) IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) { - f.VarP(newIPMaskValue(value, p), name, "", usage) -} - -// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) { - f.VarP(newIPMaskValue(value, p), name, shorthand, usage) -} - -// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string. -// The argument p points to an net.IPMask variable in which to store the value of the flag. -func IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) { - CommandLine.VarP(newIPMaskValue(value, p), name, "", usage) -} - -// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash. -func IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) { - CommandLine.VarP(newIPMaskValue(value, p), name, shorthand, usage) -} - -// IPMask defines an net.IPMask flag with specified name, default value, and usage string. -// The return value is the address of an net.IPMask variable that stores the value of the flag. -func (f *FlagSet) IPMask(name string, value net.IPMask, usage string) *net.IPMask { - p := new(net.IPMask) - f.IPMaskVarP(p, name, "", value, usage) - return p -} - -// IPMaskP is like IPMask, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask { - p := new(net.IPMask) - f.IPMaskVarP(p, name, shorthand, value, usage) - return p -} - -// IPMask defines an net.IPMask flag with specified name, default value, and usage string. -// The return value is the address of an net.IPMask variable that stores the value of the flag. -func IPMask(name string, value net.IPMask, usage string) *net.IPMask { - return CommandLine.IPMaskP(name, "", value, usage) -} - -// IPMaskP is like IP, but accepts a shorthand letter that can be used after a single dash. -func IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask { - return CommandLine.IPMaskP(name, shorthand, value, usage) -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/ipnet.go b/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/ipnet.go deleted file mode 100644 index e2c1b8bcd53..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/ipnet.go +++ /dev/null @@ -1,98 +0,0 @@ -package pflag - -import ( - "fmt" - "net" - "strings" -) - -// IPNet adapts net.IPNet for use as a flag. -type ipNetValue net.IPNet - -func (ipnet ipNetValue) String() string { - n := net.IPNet(ipnet) - return n.String() -} - -func (ipnet *ipNetValue) Set(value string) error { - _, n, err := net.ParseCIDR(strings.TrimSpace(value)) - if err != nil { - return err - } - *ipnet = ipNetValue(*n) - return nil -} - -func (*ipNetValue) Type() string { - return "ipNet" -} - -func newIPNetValue(val net.IPNet, p *net.IPNet) *ipNetValue { - *p = val - return (*ipNetValue)(p) -} - -func ipNetConv(sval string) (interface{}, error) { - _, n, err := net.ParseCIDR(strings.TrimSpace(sval)) - if err == nil { - return *n, nil - } - return nil, fmt.Errorf("invalid string being converted to IPNet: %s", sval) -} - -// GetIPNet return the net.IPNet value of a flag with the given name -func (f *FlagSet) GetIPNet(name string) (net.IPNet, error) { - val, err := f.getFlagType(name, "ipNet", ipNetConv) - if err != nil { - return net.IPNet{}, err - } - return val.(net.IPNet), nil -} - -// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string. -// The argument p points to an net.IPNet variable in which to store the value of the flag. -func (f *FlagSet) IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) { - f.VarP(newIPNetValue(value, p), name, "", usage) -} - -// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) { - f.VarP(newIPNetValue(value, p), name, shorthand, usage) -} - -// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string. -// The argument p points to an net.IPNet variable in which to store the value of the flag. -func IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) { - CommandLine.VarP(newIPNetValue(value, p), name, "", usage) -} - -// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash. -func IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) { - CommandLine.VarP(newIPNetValue(value, p), name, shorthand, usage) -} - -// IPNet defines an net.IPNet flag with specified name, default value, and usage string. -// The return value is the address of an net.IPNet variable that stores the value of the flag. -func (f *FlagSet) IPNet(name string, value net.IPNet, usage string) *net.IPNet { - p := new(net.IPNet) - f.IPNetVarP(p, name, "", value, usage) - return p -} - -// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet { - p := new(net.IPNet) - f.IPNetVarP(p, name, shorthand, value, usage) - return p -} - -// IPNet defines an net.IPNet flag with specified name, default value, and usage string. -// The return value is the address of an net.IPNet variable that stores the value of the flag. -func IPNet(name string, value net.IPNet, usage string) *net.IPNet { - return CommandLine.IPNetP(name, "", value, usage) -} - -// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash. -func IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet { - return CommandLine.IPNetP(name, shorthand, value, usage) -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/string.go b/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/string.go deleted file mode 100644 index 04e0a26ff7f..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/string.go +++ /dev/null @@ -1,80 +0,0 @@ -package pflag - -// -- string Value -type stringValue string - -func newStringValue(val string, p *string) *stringValue { - *p = val - return (*stringValue)(p) -} - -func (s *stringValue) Set(val string) error { - *s = stringValue(val) - return nil -} -func (s *stringValue) Type() string { - return "string" -} - -func (s *stringValue) String() string { return string(*s) } - -func stringConv(sval string) (interface{}, error) { - return sval, nil -} - -// GetString return the string value of a flag with the given name -func (f *FlagSet) GetString(name string) (string, error) { - val, err := f.getFlagType(name, "string", stringConv) - if err != nil { - return "", err - } - return val.(string), nil -} - -// StringVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a string variable in which to store the value of the flag. -func (f *FlagSet) StringVar(p *string, name string, value string, usage string) { - f.VarP(newStringValue(value, p), name, "", usage) -} - -// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringVarP(p *string, name, shorthand string, value string, usage string) { - f.VarP(newStringValue(value, p), name, shorthand, usage) -} - -// StringVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a string variable in which to store the value of the flag. -func StringVar(p *string, name string, value string, usage string) { - CommandLine.VarP(newStringValue(value, p), name, "", usage) -} - -// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash. -func StringVarP(p *string, name, shorthand string, value string, usage string) { - CommandLine.VarP(newStringValue(value, p), name, shorthand, usage) -} - -// String defines a string flag with specified name, default value, and usage string. -// The return value is the address of a string variable that stores the value of the flag. -func (f *FlagSet) String(name string, value string, usage string) *string { - p := new(string) - f.StringVarP(p, name, "", value, usage) - return p -} - -// StringP is like String, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringP(name, shorthand string, value string, usage string) *string { - p := new(string) - f.StringVarP(p, name, shorthand, value, usage) - return p -} - -// String defines a string flag with specified name, default value, and usage string. -// The return value is the address of a string variable that stores the value of the flag. -func String(name string, value string, usage string) *string { - return CommandLine.StringP(name, "", value, usage) -} - -// StringP is like String, but accepts a shorthand letter that can be used after a single dash. -func StringP(name, shorthand string, value string, usage string) *string { - return CommandLine.StringP(name, shorthand, value, usage) -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/string_array.go b/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/string_array.go deleted file mode 100644 index fa7bc60187a..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/string_array.go +++ /dev/null @@ -1,103 +0,0 @@ -package pflag - -// -- stringArray Value -type stringArrayValue struct { - value *[]string - changed bool -} - -func newStringArrayValue(val []string, p *[]string) *stringArrayValue { - ssv := new(stringArrayValue) - ssv.value = p - *ssv.value = val - return ssv -} - -func (s *stringArrayValue) Set(val string) error { - if !s.changed { - *s.value = []string{val} - s.changed = true - } else { - *s.value = append(*s.value, val) - } - return nil -} - -func (s *stringArrayValue) Type() string { - return "stringArray" -} - -func (s *stringArrayValue) String() string { - str, _ := writeAsCSV(*s.value) - return "[" + str + "]" -} - -func stringArrayConv(sval string) (interface{}, error) { - sval = sval[1 : len(sval)-1] - // An empty string would cause a array with one (empty) string - if len(sval) == 0 { - return []string{}, nil - } - return readAsCSV(sval) -} - -// GetStringArray return the []string value of a flag with the given name -func (f *FlagSet) GetStringArray(name string) ([]string, error) { - val, err := f.getFlagType(name, "stringArray", stringArrayConv) - if err != nil { - return []string{}, err - } - return val.([]string), nil -} - -// StringArrayVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a []string variable in which to store the values of the multiple flags. -// The value of each argument will not try to be separated by comma. Use a StringSlice for that. -func (f *FlagSet) StringArrayVar(p *[]string, name string, value []string, usage string) { - f.VarP(newStringArrayValue(value, p), name, "", usage) -} - -// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) { - f.VarP(newStringArrayValue(value, p), name, shorthand, usage) -} - -// StringArrayVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a []string variable in which to store the value of the flag. -// The value of each argument will not try to be separated by comma. Use a StringSlice for that. -func StringArrayVar(p *[]string, name string, value []string, usage string) { - CommandLine.VarP(newStringArrayValue(value, p), name, "", usage) -} - -// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash. -func StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) { - CommandLine.VarP(newStringArrayValue(value, p), name, shorthand, usage) -} - -// StringArray defines a string flag with specified name, default value, and usage string. -// The return value is the address of a []string variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma. Use a StringSlice for that. -func (f *FlagSet) StringArray(name string, value []string, usage string) *[]string { - p := []string{} - f.StringArrayVarP(&p, name, "", value, usage) - return &p -} - -// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringArrayP(name, shorthand string, value []string, usage string) *[]string { - p := []string{} - f.StringArrayVarP(&p, name, shorthand, value, usage) - return &p -} - -// StringArray defines a string flag with specified name, default value, and usage string. -// The return value is the address of a []string variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma. Use a StringSlice for that. -func StringArray(name string, value []string, usage string) *[]string { - return CommandLine.StringArrayP(name, "", value, usage) -} - -// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash. -func StringArrayP(name, shorthand string, value []string, usage string) *[]string { - return CommandLine.StringArrayP(name, shorthand, value, usage) -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/string_slice.go b/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/string_slice.go deleted file mode 100644 index 0cd3ccc083e..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/string_slice.go +++ /dev/null @@ -1,149 +0,0 @@ -package pflag - -import ( - "bytes" - "encoding/csv" - "strings" -) - -// -- stringSlice Value -type stringSliceValue struct { - value *[]string - changed bool -} - -func newStringSliceValue(val []string, p *[]string) *stringSliceValue { - ssv := new(stringSliceValue) - ssv.value = p - *ssv.value = val - return ssv -} - -func readAsCSV(val string) ([]string, error) { - if val == "" { - return []string{}, nil - } - stringReader := strings.NewReader(val) - csvReader := csv.NewReader(stringReader) - return csvReader.Read() -} - -func writeAsCSV(vals []string) (string, error) { - b := &bytes.Buffer{} - w := csv.NewWriter(b) - err := w.Write(vals) - if err != nil { - return "", err - } - w.Flush() - return strings.TrimSuffix(b.String(), "\n"), nil -} - -func (s *stringSliceValue) Set(val string) error { - v, err := readAsCSV(val) - if err != nil { - return err - } - if !s.changed { - *s.value = v - } else { - *s.value = append(*s.value, v...) - } - s.changed = true - return nil -} - -func (s *stringSliceValue) Type() string { - return "stringSlice" -} - -func (s *stringSliceValue) String() string { - str, _ := writeAsCSV(*s.value) - return "[" + str + "]" -} - -func stringSliceConv(sval string) (interface{}, error) { - sval = sval[1 : len(sval)-1] - // An empty string would cause a slice with one (empty) string - if len(sval) == 0 { - return []string{}, nil - } - return readAsCSV(sval) -} - -// GetStringSlice return the []string value of a flag with the given name -func (f *FlagSet) GetStringSlice(name string) ([]string, error) { - val, err := f.getFlagType(name, "stringSlice", stringSliceConv) - if err != nil { - return []string{}, err - } - return val.([]string), nil -} - -// StringSliceVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a []string variable in which to store the value of the flag. -// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. -// For example: -// --ss="v1,v2" -ss="v3" -// will result in -// []string{"v1", "v2", "v3"} -func (f *FlagSet) StringSliceVar(p *[]string, name string, value []string, usage string) { - f.VarP(newStringSliceValue(value, p), name, "", usage) -} - -// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) { - f.VarP(newStringSliceValue(value, p), name, shorthand, usage) -} - -// StringSliceVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a []string variable in which to store the value of the flag. -// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. -// For example: -// --ss="v1,v2" -ss="v3" -// will result in -// []string{"v1", "v2", "v3"} -func StringSliceVar(p *[]string, name string, value []string, usage string) { - CommandLine.VarP(newStringSliceValue(value, p), name, "", usage) -} - -// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash. -func StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) { - CommandLine.VarP(newStringSliceValue(value, p), name, shorthand, usage) -} - -// StringSlice defines a string flag with specified name, default value, and usage string. -// The return value is the address of a []string variable that stores the value of the flag. -// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. -// For example: -// --ss="v1,v2" -ss="v3" -// will result in -// []string{"v1", "v2", "v3"} -func (f *FlagSet) StringSlice(name string, value []string, usage string) *[]string { - p := []string{} - f.StringSliceVarP(&p, name, "", value, usage) - return &p -} - -// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringSliceP(name, shorthand string, value []string, usage string) *[]string { - p := []string{} - f.StringSliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// StringSlice defines a string flag with specified name, default value, and usage string. -// The return value is the address of a []string variable that stores the value of the flag. -// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. -// For example: -// --ss="v1,v2" -ss="v3" -// will result in -// []string{"v1", "v2", "v3"} -func StringSlice(name string, value []string, usage string) *[]string { - return CommandLine.StringSliceP(name, "", value, usage) -} - -// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash. -func StringSliceP(name, shorthand string, value []string, usage string) *[]string { - return CommandLine.StringSliceP(name, shorthand, value, usage) -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/string_to_int.go b/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/string_to_int.go deleted file mode 100644 index 5ceda3965df..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/string_to_int.go +++ /dev/null @@ -1,149 +0,0 @@ -package pflag - -import ( - "bytes" - "fmt" - "strconv" - "strings" -) - -// -- stringToInt Value -type stringToIntValue struct { - value *map[string]int - changed bool -} - -func newStringToIntValue(val map[string]int, p *map[string]int) *stringToIntValue { - ssv := new(stringToIntValue) - ssv.value = p - *ssv.value = val - return ssv -} - -// Format: a=1,b=2 -func (s *stringToIntValue) Set(val string) error { - ss := strings.Split(val, ",") - out := make(map[string]int, len(ss)) - for _, pair := range ss { - kv := strings.SplitN(pair, "=", 2) - if len(kv) != 2 { - return fmt.Errorf("%s must be formatted as key=value", pair) - } - var err error - out[kv[0]], err = strconv.Atoi(kv[1]) - if err != nil { - return err - } - } - if !s.changed { - *s.value = out - } else { - for k, v := range out { - (*s.value)[k] = v - } - } - s.changed = true - return nil -} - -func (s *stringToIntValue) Type() string { - return "stringToInt" -} - -func (s *stringToIntValue) String() string { - var buf bytes.Buffer - i := 0 - for k, v := range *s.value { - if i > 0 { - buf.WriteRune(',') - } - buf.WriteString(k) - buf.WriteRune('=') - buf.WriteString(strconv.Itoa(v)) - i++ - } - return "[" + buf.String() + "]" -} - -func stringToIntConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // An empty string would cause an empty map - if len(val) == 0 { - return map[string]int{}, nil - } - ss := strings.Split(val, ",") - out := make(map[string]int, len(ss)) - for _, pair := range ss { - kv := strings.SplitN(pair, "=", 2) - if len(kv) != 2 { - return nil, fmt.Errorf("%s must be formatted as key=value", pair) - } - var err error - out[kv[0]], err = strconv.Atoi(kv[1]) - if err != nil { - return nil, err - } - } - return out, nil -} - -// GetStringToInt return the map[string]int value of a flag with the given name -func (f *FlagSet) GetStringToInt(name string) (map[string]int, error) { - val, err := f.getFlagType(name, "stringToInt", stringToIntConv) - if err != nil { - return map[string]int{}, err - } - return val.(map[string]int), nil -} - -// StringToIntVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a map[string]int variable in which to store the values of the multiple flags. -// The value of each argument will not try to be separated by comma -func (f *FlagSet) StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) { - f.VarP(newStringToIntValue(value, p), name, "", usage) -} - -// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) { - f.VarP(newStringToIntValue(value, p), name, shorthand, usage) -} - -// StringToIntVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a map[string]int variable in which to store the value of the flag. -// The value of each argument will not try to be separated by comma -func StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) { - CommandLine.VarP(newStringToIntValue(value, p), name, "", usage) -} - -// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash. -func StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) { - CommandLine.VarP(newStringToIntValue(value, p), name, shorthand, usage) -} - -// StringToInt defines a string flag with specified name, default value, and usage string. -// The return value is the address of a map[string]int variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma -func (f *FlagSet) StringToInt(name string, value map[string]int, usage string) *map[string]int { - p := map[string]int{} - f.StringToIntVarP(&p, name, "", value, usage) - return &p -} - -// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int { - p := map[string]int{} - f.StringToIntVarP(&p, name, shorthand, value, usage) - return &p -} - -// StringToInt defines a string flag with specified name, default value, and usage string. -// The return value is the address of a map[string]int variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma -func StringToInt(name string, value map[string]int, usage string) *map[string]int { - return CommandLine.StringToIntP(name, "", value, usage) -} - -// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash. -func StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int { - return CommandLine.StringToIntP(name, shorthand, value, usage) -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/string_to_string.go b/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/string_to_string.go deleted file mode 100644 index 890a01afc03..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/string_to_string.go +++ /dev/null @@ -1,160 +0,0 @@ -package pflag - -import ( - "bytes" - "encoding/csv" - "fmt" - "strings" -) - -// -- stringToString Value -type stringToStringValue struct { - value *map[string]string - changed bool -} - -func newStringToStringValue(val map[string]string, p *map[string]string) *stringToStringValue { - ssv := new(stringToStringValue) - ssv.value = p - *ssv.value = val - return ssv -} - -// Format: a=1,b=2 -func (s *stringToStringValue) Set(val string) error { - var ss []string - n := strings.Count(val, "=") - switch n { - case 0: - return fmt.Errorf("%s must be formatted as key=value", val) - case 1: - ss = append(ss, strings.Trim(val, `"`)) - default: - r := csv.NewReader(strings.NewReader(val)) - var err error - ss, err = r.Read() - if err != nil { - return err - } - } - - out := make(map[string]string, len(ss)) - for _, pair := range ss { - kv := strings.SplitN(pair, "=", 2) - if len(kv) != 2 { - return fmt.Errorf("%s must be formatted as key=value", pair) - } - out[kv[0]] = kv[1] - } - if !s.changed { - *s.value = out - } else { - for k, v := range out { - (*s.value)[k] = v - } - } - s.changed = true - return nil -} - -func (s *stringToStringValue) Type() string { - return "stringToString" -} - -func (s *stringToStringValue) String() string { - records := make([]string, 0, len(*s.value)>>1) - for k, v := range *s.value { - records = append(records, k+"="+v) - } - - var buf bytes.Buffer - w := csv.NewWriter(&buf) - if err := w.Write(records); err != nil { - panic(err) - } - w.Flush() - return "[" + strings.TrimSpace(buf.String()) + "]" -} - -func stringToStringConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // An empty string would cause an empty map - if len(val) == 0 { - return map[string]string{}, nil - } - r := csv.NewReader(strings.NewReader(val)) - ss, err := r.Read() - if err != nil { - return nil, err - } - out := make(map[string]string, len(ss)) - for _, pair := range ss { - kv := strings.SplitN(pair, "=", 2) - if len(kv) != 2 { - return nil, fmt.Errorf("%s must be formatted as key=value", pair) - } - out[kv[0]] = kv[1] - } - return out, nil -} - -// GetStringToString return the map[string]string value of a flag with the given name -func (f *FlagSet) GetStringToString(name string) (map[string]string, error) { - val, err := f.getFlagType(name, "stringToString", stringToStringConv) - if err != nil { - return map[string]string{}, err - } - return val.(map[string]string), nil -} - -// StringToStringVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a map[string]string variable in which to store the values of the multiple flags. -// The value of each argument will not try to be separated by comma -func (f *FlagSet) StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) { - f.VarP(newStringToStringValue(value, p), name, "", usage) -} - -// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) { - f.VarP(newStringToStringValue(value, p), name, shorthand, usage) -} - -// StringToStringVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a map[string]string variable in which to store the value of the flag. -// The value of each argument will not try to be separated by comma -func StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) { - CommandLine.VarP(newStringToStringValue(value, p), name, "", usage) -} - -// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash. -func StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) { - CommandLine.VarP(newStringToStringValue(value, p), name, shorthand, usage) -} - -// StringToString defines a string flag with specified name, default value, and usage string. -// The return value is the address of a map[string]string variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma -func (f *FlagSet) StringToString(name string, value map[string]string, usage string) *map[string]string { - p := map[string]string{} - f.StringToStringVarP(&p, name, "", value, usage) - return &p -} - -// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string { - p := map[string]string{} - f.StringToStringVarP(&p, name, shorthand, value, usage) - return &p -} - -// StringToString defines a string flag with specified name, default value, and usage string. -// The return value is the address of a map[string]string variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma -func StringToString(name string, value map[string]string, usage string) *map[string]string { - return CommandLine.StringToStringP(name, "", value, usage) -} - -// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash. -func StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string { - return CommandLine.StringToStringP(name, shorthand, value, usage) -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/uint.go b/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/uint.go deleted file mode 100644 index dcbc2b758c3..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/uint.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- uint Value -type uintValue uint - -func newUintValue(val uint, p *uint) *uintValue { - *p = val - return (*uintValue)(p) -} - -func (i *uintValue) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 64) - *i = uintValue(v) - return err -} - -func (i *uintValue) Type() string { - return "uint" -} - -func (i *uintValue) String() string { return strconv.FormatUint(uint64(*i), 10) } - -func uintConv(sval string) (interface{}, error) { - v, err := strconv.ParseUint(sval, 0, 0) - if err != nil { - return 0, err - } - return uint(v), nil -} - -// GetUint return the uint value of a flag with the given name -func (f *FlagSet) GetUint(name string) (uint, error) { - val, err := f.getFlagType(name, "uint", uintConv) - if err != nil { - return 0, err - } - return val.(uint), nil -} - -// UintVar defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) { - f.VarP(newUintValue(value, p), name, "", usage) -} - -// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) UintVarP(p *uint, name, shorthand string, value uint, usage string) { - f.VarP(newUintValue(value, p), name, shorthand, usage) -} - -// UintVar defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func UintVar(p *uint, name string, value uint, usage string) { - CommandLine.VarP(newUintValue(value, p), name, "", usage) -} - -// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash. -func UintVarP(p *uint, name, shorthand string, value uint, usage string) { - CommandLine.VarP(newUintValue(value, p), name, shorthand, usage) -} - -// Uint defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func (f *FlagSet) Uint(name string, value uint, usage string) *uint { - p := new(uint) - f.UintVarP(p, name, "", value, usage) - return p -} - -// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) UintP(name, shorthand string, value uint, usage string) *uint { - p := new(uint) - f.UintVarP(p, name, shorthand, value, usage) - return p -} - -// Uint defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func Uint(name string, value uint, usage string) *uint { - return CommandLine.UintP(name, "", value, usage) -} - -// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash. -func UintP(name, shorthand string, value uint, usage string) *uint { - return CommandLine.UintP(name, shorthand, value, usage) -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/uint16.go b/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/uint16.go deleted file mode 100644 index 7e9914eddde..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/uint16.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- uint16 value -type uint16Value uint16 - -func newUint16Value(val uint16, p *uint16) *uint16Value { - *p = val - return (*uint16Value)(p) -} - -func (i *uint16Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 16) - *i = uint16Value(v) - return err -} - -func (i *uint16Value) Type() string { - return "uint16" -} - -func (i *uint16Value) String() string { return strconv.FormatUint(uint64(*i), 10) } - -func uint16Conv(sval string) (interface{}, error) { - v, err := strconv.ParseUint(sval, 0, 16) - if err != nil { - return 0, err - } - return uint16(v), nil -} - -// GetUint16 return the uint16 value of a flag with the given name -func (f *FlagSet) GetUint16(name string) (uint16, error) { - val, err := f.getFlagType(name, "uint16", uint16Conv) - if err != nil { - return 0, err - } - return val.(uint16), nil -} - -// Uint16Var defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func (f *FlagSet) Uint16Var(p *uint16, name string, value uint16, usage string) { - f.VarP(newUint16Value(value, p), name, "", usage) -} - -// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) { - f.VarP(newUint16Value(value, p), name, shorthand, usage) -} - -// Uint16Var defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func Uint16Var(p *uint16, name string, value uint16, usage string) { - CommandLine.VarP(newUint16Value(value, p), name, "", usage) -} - -// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash. -func Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) { - CommandLine.VarP(newUint16Value(value, p), name, shorthand, usage) -} - -// Uint16 defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func (f *FlagSet) Uint16(name string, value uint16, usage string) *uint16 { - p := new(uint16) - f.Uint16VarP(p, name, "", value, usage) - return p -} - -// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint16P(name, shorthand string, value uint16, usage string) *uint16 { - p := new(uint16) - f.Uint16VarP(p, name, shorthand, value, usage) - return p -} - -// Uint16 defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func Uint16(name string, value uint16, usage string) *uint16 { - return CommandLine.Uint16P(name, "", value, usage) -} - -// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash. -func Uint16P(name, shorthand string, value uint16, usage string) *uint16 { - return CommandLine.Uint16P(name, shorthand, value, usage) -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/uint32.go b/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/uint32.go deleted file mode 100644 index d8024539bf6..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/uint32.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- uint32 value -type uint32Value uint32 - -func newUint32Value(val uint32, p *uint32) *uint32Value { - *p = val - return (*uint32Value)(p) -} - -func (i *uint32Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 32) - *i = uint32Value(v) - return err -} - -func (i *uint32Value) Type() string { - return "uint32" -} - -func (i *uint32Value) String() string { return strconv.FormatUint(uint64(*i), 10) } - -func uint32Conv(sval string) (interface{}, error) { - v, err := strconv.ParseUint(sval, 0, 32) - if err != nil { - return 0, err - } - return uint32(v), nil -} - -// GetUint32 return the uint32 value of a flag with the given name -func (f *FlagSet) GetUint32(name string) (uint32, error) { - val, err := f.getFlagType(name, "uint32", uint32Conv) - if err != nil { - return 0, err - } - return val.(uint32), nil -} - -// Uint32Var defines a uint32 flag with specified name, default value, and usage string. -// The argument p points to a uint32 variable in which to store the value of the flag. -func (f *FlagSet) Uint32Var(p *uint32, name string, value uint32, usage string) { - f.VarP(newUint32Value(value, p), name, "", usage) -} - -// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) { - f.VarP(newUint32Value(value, p), name, shorthand, usage) -} - -// Uint32Var defines a uint32 flag with specified name, default value, and usage string. -// The argument p points to a uint32 variable in which to store the value of the flag. -func Uint32Var(p *uint32, name string, value uint32, usage string) { - CommandLine.VarP(newUint32Value(value, p), name, "", usage) -} - -// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash. -func Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) { - CommandLine.VarP(newUint32Value(value, p), name, shorthand, usage) -} - -// Uint32 defines a uint32 flag with specified name, default value, and usage string. -// The return value is the address of a uint32 variable that stores the value of the flag. -func (f *FlagSet) Uint32(name string, value uint32, usage string) *uint32 { - p := new(uint32) - f.Uint32VarP(p, name, "", value, usage) - return p -} - -// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint32P(name, shorthand string, value uint32, usage string) *uint32 { - p := new(uint32) - f.Uint32VarP(p, name, shorthand, value, usage) - return p -} - -// Uint32 defines a uint32 flag with specified name, default value, and usage string. -// The return value is the address of a uint32 variable that stores the value of the flag. -func Uint32(name string, value uint32, usage string) *uint32 { - return CommandLine.Uint32P(name, "", value, usage) -} - -// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash. -func Uint32P(name, shorthand string, value uint32, usage string) *uint32 { - return CommandLine.Uint32P(name, shorthand, value, usage) -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/uint64.go b/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/uint64.go deleted file mode 100644 index f62240f2cea..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/uint64.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- uint64 Value -type uint64Value uint64 - -func newUint64Value(val uint64, p *uint64) *uint64Value { - *p = val - return (*uint64Value)(p) -} - -func (i *uint64Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 64) - *i = uint64Value(v) - return err -} - -func (i *uint64Value) Type() string { - return "uint64" -} - -func (i *uint64Value) String() string { return strconv.FormatUint(uint64(*i), 10) } - -func uint64Conv(sval string) (interface{}, error) { - v, err := strconv.ParseUint(sval, 0, 64) - if err != nil { - return 0, err - } - return uint64(v), nil -} - -// GetUint64 return the uint64 value of a flag with the given name -func (f *FlagSet) GetUint64(name string) (uint64, error) { - val, err := f.getFlagType(name, "uint64", uint64Conv) - if err != nil { - return 0, err - } - return val.(uint64), nil -} - -// Uint64Var defines a uint64 flag with specified name, default value, and usage string. -// The argument p points to a uint64 variable in which to store the value of the flag. -func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) { - f.VarP(newUint64Value(value, p), name, "", usage) -} - -// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) { - f.VarP(newUint64Value(value, p), name, shorthand, usage) -} - -// Uint64Var defines a uint64 flag with specified name, default value, and usage string. -// The argument p points to a uint64 variable in which to store the value of the flag. -func Uint64Var(p *uint64, name string, value uint64, usage string) { - CommandLine.VarP(newUint64Value(value, p), name, "", usage) -} - -// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash. -func Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) { - CommandLine.VarP(newUint64Value(value, p), name, shorthand, usage) -} - -// Uint64 defines a uint64 flag with specified name, default value, and usage string. -// The return value is the address of a uint64 variable that stores the value of the flag. -func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 { - p := new(uint64) - f.Uint64VarP(p, name, "", value, usage) - return p -} - -// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint64P(name, shorthand string, value uint64, usage string) *uint64 { - p := new(uint64) - f.Uint64VarP(p, name, shorthand, value, usage) - return p -} - -// Uint64 defines a uint64 flag with specified name, default value, and usage string. -// The return value is the address of a uint64 variable that stores the value of the flag. -func Uint64(name string, value uint64, usage string) *uint64 { - return CommandLine.Uint64P(name, "", value, usage) -} - -// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash. -func Uint64P(name, shorthand string, value uint64, usage string) *uint64 { - return CommandLine.Uint64P(name, shorthand, value, usage) -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/uint8.go b/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/uint8.go deleted file mode 100644 index bb0e83c1f6d..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/uint8.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- uint8 Value -type uint8Value uint8 - -func newUint8Value(val uint8, p *uint8) *uint8Value { - *p = val - return (*uint8Value)(p) -} - -func (i *uint8Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 8) - *i = uint8Value(v) - return err -} - -func (i *uint8Value) Type() string { - return "uint8" -} - -func (i *uint8Value) String() string { return strconv.FormatUint(uint64(*i), 10) } - -func uint8Conv(sval string) (interface{}, error) { - v, err := strconv.ParseUint(sval, 0, 8) - if err != nil { - return 0, err - } - return uint8(v), nil -} - -// GetUint8 return the uint8 value of a flag with the given name -func (f *FlagSet) GetUint8(name string) (uint8, error) { - val, err := f.getFlagType(name, "uint8", uint8Conv) - if err != nil { - return 0, err - } - return val.(uint8), nil -} - -// Uint8Var defines a uint8 flag with specified name, default value, and usage string. -// The argument p points to a uint8 variable in which to store the value of the flag. -func (f *FlagSet) Uint8Var(p *uint8, name string, value uint8, usage string) { - f.VarP(newUint8Value(value, p), name, "", usage) -} - -// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) { - f.VarP(newUint8Value(value, p), name, shorthand, usage) -} - -// Uint8Var defines a uint8 flag with specified name, default value, and usage string. -// The argument p points to a uint8 variable in which to store the value of the flag. -func Uint8Var(p *uint8, name string, value uint8, usage string) { - CommandLine.VarP(newUint8Value(value, p), name, "", usage) -} - -// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash. -func Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) { - CommandLine.VarP(newUint8Value(value, p), name, shorthand, usage) -} - -// Uint8 defines a uint8 flag with specified name, default value, and usage string. -// The return value is the address of a uint8 variable that stores the value of the flag. -func (f *FlagSet) Uint8(name string, value uint8, usage string) *uint8 { - p := new(uint8) - f.Uint8VarP(p, name, "", value, usage) - return p -} - -// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint8P(name, shorthand string, value uint8, usage string) *uint8 { - p := new(uint8) - f.Uint8VarP(p, name, shorthand, value, usage) - return p -} - -// Uint8 defines a uint8 flag with specified name, default value, and usage string. -// The return value is the address of a uint8 variable that stores the value of the flag. -func Uint8(name string, value uint8, usage string) *uint8 { - return CommandLine.Uint8P(name, "", value, usage) -} - -// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash. -func Uint8P(name, shorthand string, value uint8, usage string) *uint8 { - return CommandLine.Uint8P(name, shorthand, value, usage) -} diff --git a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/uint_slice.go b/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/uint_slice.go deleted file mode 100644 index edd94c600af..00000000000 --- a/config-connector/tests/ccs-test/vendor/github.com/spf13/pflag/uint_slice.go +++ /dev/null @@ -1,126 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" - "strings" -) - -// -- uintSlice Value -type uintSliceValue struct { - value *[]uint - changed bool -} - -func newUintSliceValue(val []uint, p *[]uint) *uintSliceValue { - uisv := new(uintSliceValue) - uisv.value = p - *uisv.value = val - return uisv -} - -func (s *uintSliceValue) Set(val string) error { - ss := strings.Split(val, ",") - out := make([]uint, len(ss)) - for i, d := range ss { - u, err := strconv.ParseUint(d, 10, 0) - if err != nil { - return err - } - out[i] = uint(u) - } - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - s.changed = true - return nil -} - -func (s *uintSliceValue) Type() string { - return "uintSlice" -} - -func (s *uintSliceValue) String() string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = fmt.Sprintf("%d", d) - } - return "[" + strings.Join(out, ",") + "]" -} - -func uintSliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []uint{}, nil - } - ss := strings.Split(val, ",") - out := make([]uint, len(ss)) - for i, d := range ss { - u, err := strconv.ParseUint(d, 10, 0) - if err != nil { - return nil, err - } - out[i] = uint(u) - } - return out, nil -} - -// GetUintSlice returns the []uint value of a flag with the given name. -func (f *FlagSet) GetUintSlice(name string) ([]uint, error) { - val, err := f.getFlagType(name, "uintSlice", uintSliceConv) - if err != nil { - return []uint{}, err - } - return val.([]uint), nil -} - -// UintSliceVar defines a uintSlice flag with specified name, default value, and usage string. -// The argument p points to a []uint variable in which to store the value of the flag. -func (f *FlagSet) UintSliceVar(p *[]uint, name string, value []uint, usage string) { - f.VarP(newUintSliceValue(value, p), name, "", usage) -} - -// UintSliceVarP is like UintSliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) { - f.VarP(newUintSliceValue(value, p), name, shorthand, usage) -} - -// UintSliceVar defines a uint[] flag with specified name, default value, and usage string. -// The argument p points to a uint[] variable in which to store the value of the flag. -func UintSliceVar(p *[]uint, name string, value []uint, usage string) { - CommandLine.VarP(newUintSliceValue(value, p), name, "", usage) -} - -// UintSliceVarP is like the UintSliceVar, but accepts a shorthand letter that can be used after a single dash. -func UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) { - CommandLine.VarP(newUintSliceValue(value, p), name, shorthand, usage) -} - -// UintSlice defines a []uint flag with specified name, default value, and usage string. -// The return value is the address of a []uint variable that stores the value of the flag. -func (f *FlagSet) UintSlice(name string, value []uint, usage string) *[]uint { - p := []uint{} - f.UintSliceVarP(&p, name, "", value, usage) - return &p -} - -// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) UintSliceP(name, shorthand string, value []uint, usage string) *[]uint { - p := []uint{} - f.UintSliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// UintSlice defines a []uint flag with specified name, default value, and usage string. -// The return value is the address of a []uint variable that stores the value of the flag. -func UintSlice(name string, value []uint, usage string) *[]uint { - return CommandLine.UintSliceP(name, "", value, usage) -} - -// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash. -func UintSliceP(name, shorthand string, value []uint, usage string) *[]uint { - return CommandLine.UintSliceP(name, shorthand, value, usage) -} diff --git a/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/.travis.yml b/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/.travis.yml deleted file mode 100644 index 9f556934d8b..00000000000 --- a/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go - -go: - - 1.4 - - 1.5 - - 1.6 - - 1.7 - - 1.8 - - 1.9 - - tip - -go_import_path: gopkg.in/yaml.v2 diff --git a/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/LICENSE b/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/LICENSE deleted file mode 100644 index 8dada3edaf5..00000000000 --- a/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/LICENSE.libyaml deleted file mode 100644 index 8da58fbf6f8..00000000000 --- a/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/LICENSE.libyaml +++ /dev/null @@ -1,31 +0,0 @@ -The following files were ported to Go from C files of libyaml, and thus -are still covered by their original copyright and license: - - apic.go - emitterc.go - parserc.go - readerc.go - scannerc.go - writerc.go - yamlh.go - yamlprivateh.go - -Copyright (c) 2006 Kirill Simonov - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/NOTICE b/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/NOTICE deleted file mode 100644 index 866d74a7ad7..00000000000 --- a/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/NOTICE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2011-2016 Canonical Ltd. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/README.md b/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/README.md deleted file mode 100644 index b50c6e87755..00000000000 --- a/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/README.md +++ /dev/null @@ -1,133 +0,0 @@ -# YAML support for the Go language - -Introduction ------------- - -The yaml package enables Go programs to comfortably encode and decode YAML -values. It was developed within [Canonical](https://www.canonical.com) as -part of the [juju](https://juju.ubuntu.com) project, and is based on a -pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) -C library to parse and generate YAML data quickly and reliably. - -Compatibility -------------- - -The yaml package supports most of YAML 1.1 and 1.2, including support for -anchors, tags, map merging, etc. Multi-document unmarshalling is not yet -implemented, and base-60 floats from YAML 1.1 are purposefully not -supported since they're a poor design and are gone in YAML 1.2. - -Installation and usage ----------------------- - -The import path for the package is *gopkg.in/yaml.v2*. - -To install it, run: - - go get gopkg.in/yaml.v2 - -API documentation ------------------ - -If opened in a browser, the import path itself leads to the API documentation: - - * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) - -API stability -------------- - -The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). - - -License -------- - -The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. - - -Example -------- - -```Go -package main - -import ( - "fmt" - "log" - - "gopkg.in/yaml.v2" -) - -var data = ` -a: Easy! -b: - c: 2 - d: [3, 4] -` - -// Note: struct fields must be public in order for unmarshal to -// correctly populate the data. -type T struct { - A string - B struct { - RenamedC int `yaml:"c"` - D []int `yaml:",flow"` - } -} - -func main() { - t := T{} - - err := yaml.Unmarshal([]byte(data), &t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t:\n%v\n\n", t) - - d, err := yaml.Marshal(&t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t dump:\n%s\n\n", string(d)) - - m := make(map[interface{}]interface{}) - - err = yaml.Unmarshal([]byte(data), &m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m:\n%v\n\n", m) - - d, err = yaml.Marshal(&m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m dump:\n%s\n\n", string(d)) -} -``` - -This example will generate the following output: - -``` ---- t: -{Easy! {2 [3 4]}} - ---- t dump: -a: Easy! -b: - c: 2 - d: [3, 4] - - ---- m: -map[a:Easy! b:map[c:2 d:[3 4]]] - ---- m dump: -a: Easy! -b: - c: 2 - d: - - 3 - - 4 -``` - diff --git a/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/apic.go b/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/apic.go deleted file mode 100644 index 1f7e87e6727..00000000000 --- a/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/apic.go +++ /dev/null @@ -1,739 +0,0 @@ -package yaml - -import ( - "io" -) - -func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { - //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) - - // Check if we can move the queue at the beginning of the buffer. - if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { - if parser.tokens_head != len(parser.tokens) { - copy(parser.tokens, parser.tokens[parser.tokens_head:]) - } - parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] - parser.tokens_head = 0 - } - parser.tokens = append(parser.tokens, *token) - if pos < 0 { - return - } - copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) - parser.tokens[parser.tokens_head+pos] = *token -} - -// Create a new parser object. -func yaml_parser_initialize(parser *yaml_parser_t) bool { - *parser = yaml_parser_t{ - raw_buffer: make([]byte, 0, input_raw_buffer_size), - buffer: make([]byte, 0, input_buffer_size), - } - return true -} - -// Destroy a parser object. -func yaml_parser_delete(parser *yaml_parser_t) { - *parser = yaml_parser_t{} -} - -// String read handler. -func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { - if parser.input_pos == len(parser.input) { - return 0, io.EOF - } - n = copy(buffer, parser.input[parser.input_pos:]) - parser.input_pos += n - return n, nil -} - -// Reader read handler. -func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { - return parser.input_reader.Read(buffer) -} - -// Set a string input. -func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { - if parser.read_handler != nil { - panic("must set the input source only once") - } - parser.read_handler = yaml_string_read_handler - parser.input = input - parser.input_pos = 0 -} - -// Set a file input. -func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { - if parser.read_handler != nil { - panic("must set the input source only once") - } - parser.read_handler = yaml_reader_read_handler - parser.input_reader = r -} - -// Set the source encoding. -func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { - if parser.encoding != yaml_ANY_ENCODING { - panic("must set the encoding only once") - } - parser.encoding = encoding -} - -// Create a new emitter object. -func yaml_emitter_initialize(emitter *yaml_emitter_t) { - *emitter = yaml_emitter_t{ - buffer: make([]byte, output_buffer_size), - raw_buffer: make([]byte, 0, output_raw_buffer_size), - states: make([]yaml_emitter_state_t, 0, initial_stack_size), - events: make([]yaml_event_t, 0, initial_queue_size), - } -} - -// Destroy an emitter object. -func yaml_emitter_delete(emitter *yaml_emitter_t) { - *emitter = yaml_emitter_t{} -} - -// String write handler. -func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - *emitter.output_buffer = append(*emitter.output_buffer, buffer...) - return nil -} - -// yaml_writer_write_handler uses emitter.output_writer to write the -// emitted text. -func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - _, err := emitter.output_writer.Write(buffer) - return err -} - -// Set a string output. -func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { - if emitter.write_handler != nil { - panic("must set the output target only once") - } - emitter.write_handler = yaml_string_write_handler - emitter.output_buffer = output_buffer -} - -// Set a file output. -func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { - if emitter.write_handler != nil { - panic("must set the output target only once") - } - emitter.write_handler = yaml_writer_write_handler - emitter.output_writer = w -} - -// Set the output encoding. -func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { - if emitter.encoding != yaml_ANY_ENCODING { - panic("must set the output encoding only once") - } - emitter.encoding = encoding -} - -// Set the canonical output style. -func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { - emitter.canonical = canonical -} - -//// Set the indentation increment. -func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { - if indent < 2 || indent > 9 { - indent = 2 - } - emitter.best_indent = indent -} - -// Set the preferred line width. -func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { - if width < 0 { - width = -1 - } - emitter.best_width = width -} - -// Set if unescaped non-ASCII characters are allowed. -func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { - emitter.unicode = unicode -} - -// Set the preferred line break character. -func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { - emitter.line_break = line_break -} - -///* -// * Destroy a token object. -// */ -// -//YAML_DECLARE(void) -//yaml_token_delete(yaml_token_t *token) -//{ -// assert(token); // Non-NULL token object expected. -// -// switch (token.type) -// { -// case YAML_TAG_DIRECTIVE_TOKEN: -// yaml_free(token.data.tag_directive.handle); -// yaml_free(token.data.tag_directive.prefix); -// break; -// -// case YAML_ALIAS_TOKEN: -// yaml_free(token.data.alias.value); -// break; -// -// case YAML_ANCHOR_TOKEN: -// yaml_free(token.data.anchor.value); -// break; -// -// case YAML_TAG_TOKEN: -// yaml_free(token.data.tag.handle); -// yaml_free(token.data.tag.suffix); -// break; -// -// case YAML_SCALAR_TOKEN: -// yaml_free(token.data.scalar.value); -// break; -// -// default: -// break; -// } -// -// memset(token, 0, sizeof(yaml_token_t)); -//} -// -///* -// * Check if a string is a valid UTF-8 sequence. -// * -// * Check 'reader.c' for more details on UTF-8 encoding. -// */ -// -//static int -//yaml_check_utf8(yaml_char_t *start, size_t length) -//{ -// yaml_char_t *end = start+length; -// yaml_char_t *pointer = start; -// -// while (pointer < end) { -// unsigned char octet; -// unsigned int width; -// unsigned int value; -// size_t k; -// -// octet = pointer[0]; -// width = (octet & 0x80) == 0x00 ? 1 : -// (octet & 0xE0) == 0xC0 ? 2 : -// (octet & 0xF0) == 0xE0 ? 3 : -// (octet & 0xF8) == 0xF0 ? 4 : 0; -// value = (octet & 0x80) == 0x00 ? octet & 0x7F : -// (octet & 0xE0) == 0xC0 ? octet & 0x1F : -// (octet & 0xF0) == 0xE0 ? octet & 0x0F : -// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; -// if (!width) return 0; -// if (pointer+width > end) return 0; -// for (k = 1; k < width; k ++) { -// octet = pointer[k]; -// if ((octet & 0xC0) != 0x80) return 0; -// value = (value << 6) + (octet & 0x3F); -// } -// if (!((width == 1) || -// (width == 2 && value >= 0x80) || -// (width == 3 && value >= 0x800) || -// (width == 4 && value >= 0x10000))) return 0; -// -// pointer += width; -// } -// -// return 1; -//} -// - -// Create STREAM-START. -func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { - *event = yaml_event_t{ - typ: yaml_STREAM_START_EVENT, - encoding: encoding, - } -} - -// Create STREAM-END. -func yaml_stream_end_event_initialize(event *yaml_event_t) { - *event = yaml_event_t{ - typ: yaml_STREAM_END_EVENT, - } -} - -// Create DOCUMENT-START. -func yaml_document_start_event_initialize( - event *yaml_event_t, - version_directive *yaml_version_directive_t, - tag_directives []yaml_tag_directive_t, - implicit bool, -) { - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: implicit, - } -} - -// Create DOCUMENT-END. -func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { - *event = yaml_event_t{ - typ: yaml_DOCUMENT_END_EVENT, - implicit: implicit, - } -} - -///* -// * Create ALIAS. -// */ -// -//YAML_DECLARE(int) -//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) -//{ -// mark yaml_mark_t = { 0, 0, 0 } -// anchor_copy *yaml_char_t = NULL -// -// assert(event) // Non-NULL event object is expected. -// assert(anchor) // Non-NULL anchor is expected. -// -// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 -// -// anchor_copy = yaml_strdup(anchor) -// if (!anchor_copy) -// return 0 -// -// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) -// -// return 1 -//} - -// Create SCALAR. -func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - anchor: anchor, - tag: tag, - value: value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(style), - } - return true -} - -// Create SEQUENCE-START. -func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } - return true -} - -// Create SEQUENCE-END. -func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - } - return true -} - -// Create MAPPING-START. -func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } -} - -// Create MAPPING-END. -func yaml_mapping_end_event_initialize(event *yaml_event_t) { - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - } -} - -// Destroy an event object. -func yaml_event_delete(event *yaml_event_t) { - *event = yaml_event_t{} -} - -///* -// * Create a document object. -// */ -// -//YAML_DECLARE(int) -//yaml_document_initialize(document *yaml_document_t, -// version_directive *yaml_version_directive_t, -// tag_directives_start *yaml_tag_directive_t, -// tag_directives_end *yaml_tag_directive_t, -// start_implicit int, end_implicit int) -//{ -// struct { -// error yaml_error_type_t -// } context -// struct { -// start *yaml_node_t -// end *yaml_node_t -// top *yaml_node_t -// } nodes = { NULL, NULL, NULL } -// version_directive_copy *yaml_version_directive_t = NULL -// struct { -// start *yaml_tag_directive_t -// end *yaml_tag_directive_t -// top *yaml_tag_directive_t -// } tag_directives_copy = { NULL, NULL, NULL } -// value yaml_tag_directive_t = { NULL, NULL } -// mark yaml_mark_t = { 0, 0, 0 } -// -// assert(document) // Non-NULL document object is expected. -// assert((tag_directives_start && tag_directives_end) || -// (tag_directives_start == tag_directives_end)) -// // Valid tag directives are expected. -// -// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error -// -// if (version_directive) { -// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) -// if (!version_directive_copy) goto error -// version_directive_copy.major = version_directive.major -// version_directive_copy.minor = version_directive.minor -// } -// -// if (tag_directives_start != tag_directives_end) { -// tag_directive *yaml_tag_directive_t -// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) -// goto error -// for (tag_directive = tag_directives_start -// tag_directive != tag_directives_end; tag_directive ++) { -// assert(tag_directive.handle) -// assert(tag_directive.prefix) -// if (!yaml_check_utf8(tag_directive.handle, -// strlen((char *)tag_directive.handle))) -// goto error -// if (!yaml_check_utf8(tag_directive.prefix, -// strlen((char *)tag_directive.prefix))) -// goto error -// value.handle = yaml_strdup(tag_directive.handle) -// value.prefix = yaml_strdup(tag_directive.prefix) -// if (!value.handle || !value.prefix) goto error -// if (!PUSH(&context, tag_directives_copy, value)) -// goto error -// value.handle = NULL -// value.prefix = NULL -// } -// } -// -// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, -// tag_directives_copy.start, tag_directives_copy.top, -// start_implicit, end_implicit, mark, mark) -// -// return 1 -// -//error: -// STACK_DEL(&context, nodes) -// yaml_free(version_directive_copy) -// while (!STACK_EMPTY(&context, tag_directives_copy)) { -// value yaml_tag_directive_t = POP(&context, tag_directives_copy) -// yaml_free(value.handle) -// yaml_free(value.prefix) -// } -// STACK_DEL(&context, tag_directives_copy) -// yaml_free(value.handle) -// yaml_free(value.prefix) -// -// return 0 -//} -// -///* -// * Destroy a document object. -// */ -// -//YAML_DECLARE(void) -//yaml_document_delete(document *yaml_document_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// tag_directive *yaml_tag_directive_t -// -// context.error = YAML_NO_ERROR // Eliminate a compiler warning. -// -// assert(document) // Non-NULL document object is expected. -// -// while (!STACK_EMPTY(&context, document.nodes)) { -// node yaml_node_t = POP(&context, document.nodes) -// yaml_free(node.tag) -// switch (node.type) { -// case YAML_SCALAR_NODE: -// yaml_free(node.data.scalar.value) -// break -// case YAML_SEQUENCE_NODE: -// STACK_DEL(&context, node.data.sequence.items) -// break -// case YAML_MAPPING_NODE: -// STACK_DEL(&context, node.data.mapping.pairs) -// break -// default: -// assert(0) // Should not happen. -// } -// } -// STACK_DEL(&context, document.nodes) -// -// yaml_free(document.version_directive) -// for (tag_directive = document.tag_directives.start -// tag_directive != document.tag_directives.end -// tag_directive++) { -// yaml_free(tag_directive.handle) -// yaml_free(tag_directive.prefix) -// } -// yaml_free(document.tag_directives.start) -// -// memset(document, 0, sizeof(yaml_document_t)) -//} -// -///** -// * Get a document node. -// */ -// -//YAML_DECLARE(yaml_node_t *) -//yaml_document_get_node(document *yaml_document_t, index int) -//{ -// assert(document) // Non-NULL document object is expected. -// -// if (index > 0 && document.nodes.start + index <= document.nodes.top) { -// return document.nodes.start + index - 1 -// } -// return NULL -//} -// -///** -// * Get the root object. -// */ -// -//YAML_DECLARE(yaml_node_t *) -//yaml_document_get_root_node(document *yaml_document_t) -//{ -// assert(document) // Non-NULL document object is expected. -// -// if (document.nodes.top != document.nodes.start) { -// return document.nodes.start -// } -// return NULL -//} -// -///* -// * Add a scalar node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_scalar(document *yaml_document_t, -// tag *yaml_char_t, value *yaml_char_t, length int, -// style yaml_scalar_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// value_copy *yaml_char_t = NULL -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// assert(value) // Non-NULL value is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (length < 0) { -// length = strlen((char *)value) -// } -// -// if (!yaml_check_utf8(value, length)) goto error -// value_copy = yaml_malloc(length+1) -// if (!value_copy) goto error -// memcpy(value_copy, value, length) -// value_copy[length] = '\0' -// -// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// yaml_free(tag_copy) -// yaml_free(value_copy) -// -// return 0 -//} -// -///* -// * Add a sequence node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_sequence(document *yaml_document_t, -// tag *yaml_char_t, style yaml_sequence_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// struct { -// start *yaml_node_item_t -// end *yaml_node_item_t -// top *yaml_node_item_t -// } items = { NULL, NULL, NULL } -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error -// -// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, -// style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// STACK_DEL(&context, items) -// yaml_free(tag_copy) -// -// return 0 -//} -// -///* -// * Add a mapping node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_mapping(document *yaml_document_t, -// tag *yaml_char_t, style yaml_mapping_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// struct { -// start *yaml_node_pair_t -// end *yaml_node_pair_t -// top *yaml_node_pair_t -// } pairs = { NULL, NULL, NULL } -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error -// -// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, -// style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// STACK_DEL(&context, pairs) -// yaml_free(tag_copy) -// -// return 0 -//} -// -///* -// * Append an item to a sequence node. -// */ -// -//YAML_DECLARE(int) -//yaml_document_append_sequence_item(document *yaml_document_t, -// sequence int, item int) -//{ -// struct { -// error yaml_error_type_t -// } context -// -// assert(document) // Non-NULL document is required. -// assert(sequence > 0 -// && document.nodes.start + sequence <= document.nodes.top) -// // Valid sequence id is required. -// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) -// // A sequence node is required. -// assert(item > 0 && document.nodes.start + item <= document.nodes.top) -// // Valid item id is required. -// -// if (!PUSH(&context, -// document.nodes.start[sequence-1].data.sequence.items, item)) -// return 0 -// -// return 1 -//} -// -///* -// * Append a pair of a key and a value to a mapping node. -// */ -// -//YAML_DECLARE(int) -//yaml_document_append_mapping_pair(document *yaml_document_t, -// mapping int, key int, value int) -//{ -// struct { -// error yaml_error_type_t -// } context -// -// pair yaml_node_pair_t -// -// assert(document) // Non-NULL document is required. -// assert(mapping > 0 -// && document.nodes.start + mapping <= document.nodes.top) -// // Valid mapping id is required. -// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) -// // A mapping node is required. -// assert(key > 0 && document.nodes.start + key <= document.nodes.top) -// // Valid key id is required. -// assert(value > 0 && document.nodes.start + value <= document.nodes.top) -// // Valid value id is required. -// -// pair.key = key -// pair.value = value -// -// if (!PUSH(&context, -// document.nodes.start[mapping-1].data.mapping.pairs, pair)) -// return 0 -// -// return 1 -//} -// -// diff --git a/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/decode.go b/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/decode.go deleted file mode 100644 index e4e56e28e0e..00000000000 --- a/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/decode.go +++ /dev/null @@ -1,775 +0,0 @@ -package yaml - -import ( - "encoding" - "encoding/base64" - "fmt" - "io" - "math" - "reflect" - "strconv" - "time" -) - -const ( - documentNode = 1 << iota - mappingNode - sequenceNode - scalarNode - aliasNode -) - -type node struct { - kind int - line, column int - tag string - // For an alias node, alias holds the resolved alias. - alias *node - value string - implicit bool - children []*node - anchors map[string]*node -} - -// ---------------------------------------------------------------------------- -// Parser, produces a node tree out of a libyaml event stream. - -type parser struct { - parser yaml_parser_t - event yaml_event_t - doc *node - doneInit bool -} - -func newParser(b []byte) *parser { - p := parser{} - if !yaml_parser_initialize(&p.parser) { - panic("failed to initialize YAML emitter") - } - if len(b) == 0 { - b = []byte{'\n'} - } - yaml_parser_set_input_string(&p.parser, b) - return &p -} - -func newParserFromReader(r io.Reader) *parser { - p := parser{} - if !yaml_parser_initialize(&p.parser) { - panic("failed to initialize YAML emitter") - } - yaml_parser_set_input_reader(&p.parser, r) - return &p -} - -func (p *parser) init() { - if p.doneInit { - return - } - p.expect(yaml_STREAM_START_EVENT) - p.doneInit = true -} - -func (p *parser) destroy() { - if p.event.typ != yaml_NO_EVENT { - yaml_event_delete(&p.event) - } - yaml_parser_delete(&p.parser) -} - -// expect consumes an event from the event stream and -// checks that it's of the expected type. -func (p *parser) expect(e yaml_event_type_t) { - if p.event.typ == yaml_NO_EVENT { - if !yaml_parser_parse(&p.parser, &p.event) { - p.fail() - } - } - if p.event.typ == yaml_STREAM_END_EVENT { - failf("attempted to go past the end of stream; corrupted value?") - } - if p.event.typ != e { - p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) - p.fail() - } - yaml_event_delete(&p.event) - p.event.typ = yaml_NO_EVENT -} - -// peek peeks at the next event in the event stream, -// puts the results into p.event and returns the event type. -func (p *parser) peek() yaml_event_type_t { - if p.event.typ != yaml_NO_EVENT { - return p.event.typ - } - if !yaml_parser_parse(&p.parser, &p.event) { - p.fail() - } - return p.event.typ -} - -func (p *parser) fail() { - var where string - var line int - if p.parser.problem_mark.line != 0 { - line = p.parser.problem_mark.line - // Scanner errors don't iterate line before returning error - if p.parser.error == yaml_SCANNER_ERROR { - line++ - } - } else if p.parser.context_mark.line != 0 { - line = p.parser.context_mark.line - } - if line != 0 { - where = "line " + strconv.Itoa(line) + ": " - } - var msg string - if len(p.parser.problem) > 0 { - msg = p.parser.problem - } else { - msg = "unknown problem parsing YAML content" - } - failf("%s%s", where, msg) -} - -func (p *parser) anchor(n *node, anchor []byte) { - if anchor != nil { - p.doc.anchors[string(anchor)] = n - } -} - -func (p *parser) parse() *node { - p.init() - switch p.peek() { - case yaml_SCALAR_EVENT: - return p.scalar() - case yaml_ALIAS_EVENT: - return p.alias() - case yaml_MAPPING_START_EVENT: - return p.mapping() - case yaml_SEQUENCE_START_EVENT: - return p.sequence() - case yaml_DOCUMENT_START_EVENT: - return p.document() - case yaml_STREAM_END_EVENT: - // Happens when attempting to decode an empty buffer. - return nil - default: - panic("attempted to parse unknown event: " + p.event.typ.String()) - } -} - -func (p *parser) node(kind int) *node { - return &node{ - kind: kind, - line: p.event.start_mark.line, - column: p.event.start_mark.column, - } -} - -func (p *parser) document() *node { - n := p.node(documentNode) - n.anchors = make(map[string]*node) - p.doc = n - p.expect(yaml_DOCUMENT_START_EVENT) - n.children = append(n.children, p.parse()) - p.expect(yaml_DOCUMENT_END_EVENT) - return n -} - -func (p *parser) alias() *node { - n := p.node(aliasNode) - n.value = string(p.event.anchor) - n.alias = p.doc.anchors[n.value] - if n.alias == nil { - failf("unknown anchor '%s' referenced", n.value) - } - p.expect(yaml_ALIAS_EVENT) - return n -} - -func (p *parser) scalar() *node { - n := p.node(scalarNode) - n.value = string(p.event.value) - n.tag = string(p.event.tag) - n.implicit = p.event.implicit - p.anchor(n, p.event.anchor) - p.expect(yaml_SCALAR_EVENT) - return n -} - -func (p *parser) sequence() *node { - n := p.node(sequenceNode) - p.anchor(n, p.event.anchor) - p.expect(yaml_SEQUENCE_START_EVENT) - for p.peek() != yaml_SEQUENCE_END_EVENT { - n.children = append(n.children, p.parse()) - } - p.expect(yaml_SEQUENCE_END_EVENT) - return n -} - -func (p *parser) mapping() *node { - n := p.node(mappingNode) - p.anchor(n, p.event.anchor) - p.expect(yaml_MAPPING_START_EVENT) - for p.peek() != yaml_MAPPING_END_EVENT { - n.children = append(n.children, p.parse(), p.parse()) - } - p.expect(yaml_MAPPING_END_EVENT) - return n -} - -// ---------------------------------------------------------------------------- -// Decoder, unmarshals a node into a provided value. - -type decoder struct { - doc *node - aliases map[*node]bool - mapType reflect.Type - terrors []string - strict bool -} - -var ( - mapItemType = reflect.TypeOf(MapItem{}) - durationType = reflect.TypeOf(time.Duration(0)) - defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) - ifaceType = defaultMapType.Elem() - timeType = reflect.TypeOf(time.Time{}) - ptrTimeType = reflect.TypeOf(&time.Time{}) -) - -func newDecoder(strict bool) *decoder { - d := &decoder{mapType: defaultMapType, strict: strict} - d.aliases = make(map[*node]bool) - return d -} - -func (d *decoder) terror(n *node, tag string, out reflect.Value) { - if n.tag != "" { - tag = n.tag - } - value := n.value - if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { - if len(value) > 10 { - value = " `" + value[:7] + "...`" - } else { - value = " `" + value + "`" - } - } - d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) -} - -func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { - terrlen := len(d.terrors) - err := u.UnmarshalYAML(func(v interface{}) (err error) { - defer handleErr(&err) - d.unmarshal(n, reflect.ValueOf(v)) - if len(d.terrors) > terrlen { - issues := d.terrors[terrlen:] - d.terrors = d.terrors[:terrlen] - return &TypeError{issues} - } - return nil - }) - if e, ok := err.(*TypeError); ok { - d.terrors = append(d.terrors, e.Errors...) - return false - } - if err != nil { - fail(err) - } - return true -} - -// d.prepare initializes and dereferences pointers and calls UnmarshalYAML -// if a value is found to implement it. -// It returns the initialized and dereferenced out value, whether -// unmarshalling was already done by UnmarshalYAML, and if so whether -// its types unmarshalled appropriately. -// -// If n holds a null value, prepare returns before doing anything. -func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { - if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) { - return out, false, false - } - again := true - for again { - again = false - if out.Kind() == reflect.Ptr { - if out.IsNil() { - out.Set(reflect.New(out.Type().Elem())) - } - out = out.Elem() - again = true - } - if out.CanAddr() { - if u, ok := out.Addr().Interface().(Unmarshaler); ok { - good = d.callUnmarshaler(n, u) - return out, true, good - } - } - } - return out, false, false -} - -func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { - switch n.kind { - case documentNode: - return d.document(n, out) - case aliasNode: - return d.alias(n, out) - } - out, unmarshaled, good := d.prepare(n, out) - if unmarshaled { - return good - } - switch n.kind { - case scalarNode: - good = d.scalar(n, out) - case mappingNode: - good = d.mapping(n, out) - case sequenceNode: - good = d.sequence(n, out) - default: - panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) - } - return good -} - -func (d *decoder) document(n *node, out reflect.Value) (good bool) { - if len(n.children) == 1 { - d.doc = n - d.unmarshal(n.children[0], out) - return true - } - return false -} - -func (d *decoder) alias(n *node, out reflect.Value) (good bool) { - if d.aliases[n] { - // TODO this could actually be allowed in some circumstances. - failf("anchor '%s' value contains itself", n.value) - } - d.aliases[n] = true - good = d.unmarshal(n.alias, out) - delete(d.aliases, n) - return good -} - -var zeroValue reflect.Value - -func resetMap(out reflect.Value) { - for _, k := range out.MapKeys() { - out.SetMapIndex(k, zeroValue) - } -} - -func (d *decoder) scalar(n *node, out reflect.Value) bool { - var tag string - var resolved interface{} - if n.tag == "" && !n.implicit { - tag = yaml_STR_TAG - resolved = n.value - } else { - tag, resolved = resolve(n.tag, n.value) - if tag == yaml_BINARY_TAG { - data, err := base64.StdEncoding.DecodeString(resolved.(string)) - if err != nil { - failf("!!binary value contains invalid base64 data") - } - resolved = string(data) - } - } - if resolved == nil { - if out.Kind() == reflect.Map && !out.CanAddr() { - resetMap(out) - } else { - out.Set(reflect.Zero(out.Type())) - } - return true - } - if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { - // We've resolved to exactly the type we want, so use that. - out.Set(resolvedv) - return true - } - // Perhaps we can use the value as a TextUnmarshaler to - // set its value. - if out.CanAddr() { - u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) - if ok { - var text []byte - if tag == yaml_BINARY_TAG { - text = []byte(resolved.(string)) - } else { - // We let any value be unmarshaled into TextUnmarshaler. - // That might be more lax than we'd like, but the - // TextUnmarshaler itself should bowl out any dubious values. - text = []byte(n.value) - } - err := u.UnmarshalText(text) - if err != nil { - fail(err) - } - return true - } - } - switch out.Kind() { - case reflect.String: - if tag == yaml_BINARY_TAG { - out.SetString(resolved.(string)) - return true - } - if resolved != nil { - out.SetString(n.value) - return true - } - case reflect.Interface: - if resolved == nil { - out.Set(reflect.Zero(out.Type())) - } else if tag == yaml_TIMESTAMP_TAG { - // It looks like a timestamp but for backward compatibility - // reasons we set it as a string, so that code that unmarshals - // timestamp-like values into interface{} will continue to - // see a string and not a time.Time. - // TODO(v3) Drop this. - out.Set(reflect.ValueOf(n.value)) - } else { - out.Set(reflect.ValueOf(resolved)) - } - return true - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - switch resolved := resolved.(type) { - case int: - if !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - return true - } - case int64: - if !out.OverflowInt(resolved) { - out.SetInt(resolved) - return true - } - case uint64: - if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - return true - } - case float64: - if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - return true - } - case string: - if out.Type() == durationType { - d, err := time.ParseDuration(resolved) - if err == nil { - out.SetInt(int64(d)) - return true - } - } - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - switch resolved := resolved.(type) { - case int: - if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - case int64: - if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - case uint64: - if !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - case float64: - if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - } - case reflect.Bool: - switch resolved := resolved.(type) { - case bool: - out.SetBool(resolved) - return true - } - case reflect.Float32, reflect.Float64: - switch resolved := resolved.(type) { - case int: - out.SetFloat(float64(resolved)) - return true - case int64: - out.SetFloat(float64(resolved)) - return true - case uint64: - out.SetFloat(float64(resolved)) - return true - case float64: - out.SetFloat(resolved) - return true - } - case reflect.Struct: - if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { - out.Set(resolvedv) - return true - } - case reflect.Ptr: - if out.Type().Elem() == reflect.TypeOf(resolved) { - // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? - elem := reflect.New(out.Type().Elem()) - elem.Elem().Set(reflect.ValueOf(resolved)) - out.Set(elem) - return true - } - } - d.terror(n, tag, out) - return false -} - -func settableValueOf(i interface{}) reflect.Value { - v := reflect.ValueOf(i) - sv := reflect.New(v.Type()).Elem() - sv.Set(v) - return sv -} - -func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { - l := len(n.children) - - var iface reflect.Value - switch out.Kind() { - case reflect.Slice: - out.Set(reflect.MakeSlice(out.Type(), l, l)) - case reflect.Array: - if l != out.Len() { - failf("invalid array: want %d elements but got %d", out.Len(), l) - } - case reflect.Interface: - // No type hints. Will have to use a generic sequence. - iface = out - out = settableValueOf(make([]interface{}, l)) - default: - d.terror(n, yaml_SEQ_TAG, out) - return false - } - et := out.Type().Elem() - - j := 0 - for i := 0; i < l; i++ { - e := reflect.New(et).Elem() - if ok := d.unmarshal(n.children[i], e); ok { - out.Index(j).Set(e) - j++ - } - } - if out.Kind() != reflect.Array { - out.Set(out.Slice(0, j)) - } - if iface.IsValid() { - iface.Set(out) - } - return true -} - -func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { - switch out.Kind() { - case reflect.Struct: - return d.mappingStruct(n, out) - case reflect.Slice: - return d.mappingSlice(n, out) - case reflect.Map: - // okay - case reflect.Interface: - if d.mapType.Kind() == reflect.Map { - iface := out - out = reflect.MakeMap(d.mapType) - iface.Set(out) - } else { - slicev := reflect.New(d.mapType).Elem() - if !d.mappingSlice(n, slicev) { - return false - } - out.Set(slicev) - return true - } - default: - d.terror(n, yaml_MAP_TAG, out) - return false - } - outt := out.Type() - kt := outt.Key() - et := outt.Elem() - - mapType := d.mapType - if outt.Key() == ifaceType && outt.Elem() == ifaceType { - d.mapType = outt - } - - if out.IsNil() { - out.Set(reflect.MakeMap(outt)) - } - l := len(n.children) - for i := 0; i < l; i += 2 { - if isMerge(n.children[i]) { - d.merge(n.children[i+1], out) - continue - } - k := reflect.New(kt).Elem() - if d.unmarshal(n.children[i], k) { - kkind := k.Kind() - if kkind == reflect.Interface { - kkind = k.Elem().Kind() - } - if kkind == reflect.Map || kkind == reflect.Slice { - failf("invalid map key: %#v", k.Interface()) - } - e := reflect.New(et).Elem() - if d.unmarshal(n.children[i+1], e) { - d.setMapIndex(n.children[i+1], out, k, e) - } - } - } - d.mapType = mapType - return true -} - -func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) { - if d.strict && out.MapIndex(k) != zeroValue { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface())) - return - } - out.SetMapIndex(k, v) -} - -func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { - outt := out.Type() - if outt.Elem() != mapItemType { - d.terror(n, yaml_MAP_TAG, out) - return false - } - - mapType := d.mapType - d.mapType = outt - - var slice []MapItem - var l = len(n.children) - for i := 0; i < l; i += 2 { - if isMerge(n.children[i]) { - d.merge(n.children[i+1], out) - continue - } - item := MapItem{} - k := reflect.ValueOf(&item.Key).Elem() - if d.unmarshal(n.children[i], k) { - v := reflect.ValueOf(&item.Value).Elem() - if d.unmarshal(n.children[i+1], v) { - slice = append(slice, item) - } - } - } - out.Set(reflect.ValueOf(slice)) - d.mapType = mapType - return true -} - -func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { - sinfo, err := getStructInfo(out.Type()) - if err != nil { - panic(err) - } - name := settableValueOf("") - l := len(n.children) - - var inlineMap reflect.Value - var elemType reflect.Type - if sinfo.InlineMap != -1 { - inlineMap = out.Field(sinfo.InlineMap) - inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) - elemType = inlineMap.Type().Elem() - } - - var doneFields []bool - if d.strict { - doneFields = make([]bool, len(sinfo.FieldsList)) - } - for i := 0; i < l; i += 2 { - ni := n.children[i] - if isMerge(ni) { - d.merge(n.children[i+1], out) - continue - } - if !d.unmarshal(ni, name) { - continue - } - if info, ok := sinfo.FieldsMap[name.String()]; ok { - if d.strict { - if doneFields[info.Id] { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type())) - continue - } - doneFields[info.Id] = true - } - var field reflect.Value - if info.Inline == nil { - field = out.Field(info.Num) - } else { - field = out.FieldByIndex(info.Inline) - } - d.unmarshal(n.children[i+1], field) - } else if sinfo.InlineMap != -1 { - if inlineMap.IsNil() { - inlineMap.Set(reflect.MakeMap(inlineMap.Type())) - } - value := reflect.New(elemType).Elem() - d.unmarshal(n.children[i+1], value) - d.setMapIndex(n.children[i+1], inlineMap, name, value) - } else if d.strict { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type())) - } - } - return true -} - -func failWantMap() { - failf("map merge requires map or sequence of maps as the value") -} - -func (d *decoder) merge(n *node, out reflect.Value) { - switch n.kind { - case mappingNode: - d.unmarshal(n, out) - case aliasNode: - an, ok := d.doc.anchors[n.value] - if ok && an.kind != mappingNode { - failWantMap() - } - d.unmarshal(n, out) - case sequenceNode: - // Step backwards as earlier nodes take precedence. - for i := len(n.children) - 1; i >= 0; i-- { - ni := n.children[i] - if ni.kind == aliasNode { - an, ok := d.doc.anchors[ni.value] - if ok && an.kind != mappingNode { - failWantMap() - } - } else if ni.kind != mappingNode { - failWantMap() - } - d.unmarshal(ni, out) - } - default: - failWantMap() - } -} - -func isMerge(n *node) bool { - return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) -} diff --git a/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/emitterc.go b/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/emitterc.go deleted file mode 100644 index a1c2cc52627..00000000000 --- a/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/emitterc.go +++ /dev/null @@ -1,1685 +0,0 @@ -package yaml - -import ( - "bytes" - "fmt" -) - -// Flush the buffer if needed. -func flush(emitter *yaml_emitter_t) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) { - return yaml_emitter_flush(emitter) - } - return true -} - -// Put a character to the output buffer. -func put(emitter *yaml_emitter_t, value byte) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - emitter.buffer[emitter.buffer_pos] = value - emitter.buffer_pos++ - emitter.column++ - return true -} - -// Put a line break to the output buffer. -func put_break(emitter *yaml_emitter_t) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - switch emitter.line_break { - case yaml_CR_BREAK: - emitter.buffer[emitter.buffer_pos] = '\r' - emitter.buffer_pos += 1 - case yaml_LN_BREAK: - emitter.buffer[emitter.buffer_pos] = '\n' - emitter.buffer_pos += 1 - case yaml_CRLN_BREAK: - emitter.buffer[emitter.buffer_pos+0] = '\r' - emitter.buffer[emitter.buffer_pos+1] = '\n' - emitter.buffer_pos += 2 - default: - panic("unknown line break setting") - } - emitter.column = 0 - emitter.line++ - return true -} - -// Copy a character from a string into buffer. -func write(emitter *yaml_emitter_t, s []byte, i *int) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - p := emitter.buffer_pos - w := width(s[*i]) - switch w { - case 4: - emitter.buffer[p+3] = s[*i+3] - fallthrough - case 3: - emitter.buffer[p+2] = s[*i+2] - fallthrough - case 2: - emitter.buffer[p+1] = s[*i+1] - fallthrough - case 1: - emitter.buffer[p+0] = s[*i+0] - default: - panic("unknown character width") - } - emitter.column++ - emitter.buffer_pos += w - *i += w - return true -} - -// Write a whole string into buffer. -func write_all(emitter *yaml_emitter_t, s []byte) bool { - for i := 0; i < len(s); { - if !write(emitter, s, &i) { - return false - } - } - return true -} - -// Copy a line break character from a string into buffer. -func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { - if s[*i] == '\n' { - if !put_break(emitter) { - return false - } - *i++ - } else { - if !write(emitter, s, i) { - return false - } - emitter.column = 0 - emitter.line++ - } - return true -} - -// Set an emitter error and return false. -func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_EMITTER_ERROR - emitter.problem = problem - return false -} - -// Emit an event. -func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.events = append(emitter.events, *event) - for !yaml_emitter_need_more_events(emitter) { - event := &emitter.events[emitter.events_head] - if !yaml_emitter_analyze_event(emitter, event) { - return false - } - if !yaml_emitter_state_machine(emitter, event) { - return false - } - yaml_event_delete(event) - emitter.events_head++ - } - return true -} - -// Check if we need to accumulate more events before emitting. -// -// We accumulate extra -// - 1 event for DOCUMENT-START -// - 2 events for SEQUENCE-START -// - 3 events for MAPPING-START -// -func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { - if emitter.events_head == len(emitter.events) { - return true - } - var accumulate int - switch emitter.events[emitter.events_head].typ { - case yaml_DOCUMENT_START_EVENT: - accumulate = 1 - break - case yaml_SEQUENCE_START_EVENT: - accumulate = 2 - break - case yaml_MAPPING_START_EVENT: - accumulate = 3 - break - default: - return false - } - if len(emitter.events)-emitter.events_head > accumulate { - return false - } - var level int - for i := emitter.events_head; i < len(emitter.events); i++ { - switch emitter.events[i].typ { - case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: - level++ - case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: - level-- - } - if level == 0 { - return false - } - } - return true -} - -// Append a directive to the directives stack. -func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { - for i := 0; i < len(emitter.tag_directives); i++ { - if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { - if allow_duplicates { - return true - } - return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") - } - } - - // [Go] Do we actually need to copy this given garbage collection - // and the lack of deallocating destructors? - tag_copy := yaml_tag_directive_t{ - handle: make([]byte, len(value.handle)), - prefix: make([]byte, len(value.prefix)), - } - copy(tag_copy.handle, value.handle) - copy(tag_copy.prefix, value.prefix) - emitter.tag_directives = append(emitter.tag_directives, tag_copy) - return true -} - -// Increase the indentation level. -func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { - emitter.indents = append(emitter.indents, emitter.indent) - if emitter.indent < 0 { - if flow { - emitter.indent = emitter.best_indent - } else { - emitter.indent = 0 - } - } else if !indentless { - emitter.indent += emitter.best_indent - } - return true -} - -// State dispatcher. -func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { - switch emitter.state { - default: - case yaml_EMIT_STREAM_START_STATE: - return yaml_emitter_emit_stream_start(emitter, event) - - case yaml_EMIT_FIRST_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, true) - - case yaml_EMIT_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, false) - - case yaml_EMIT_DOCUMENT_CONTENT_STATE: - return yaml_emitter_emit_document_content(emitter, event) - - case yaml_EMIT_DOCUMENT_END_STATE: - return yaml_emitter_emit_document_end(emitter, event) - - case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, true) - - case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, false) - - case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, true) - - case yaml_EMIT_FLOW_MAPPING_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, false) - - case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, true) - - case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, false) - - case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, true) - - case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, false) - - case yaml_EMIT_END_STATE: - return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") - } - panic("invalid emitter state") -} - -// Expect STREAM-START. -func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if event.typ != yaml_STREAM_START_EVENT { - return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") - } - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = event.encoding - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = yaml_UTF8_ENCODING - } - } - if emitter.best_indent < 2 || emitter.best_indent > 9 { - emitter.best_indent = 2 - } - if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { - emitter.best_width = 80 - } - if emitter.best_width < 0 { - emitter.best_width = 1<<31 - 1 - } - if emitter.line_break == yaml_ANY_BREAK { - emitter.line_break = yaml_LN_BREAK - } - - emitter.indent = -1 - emitter.line = 0 - emitter.column = 0 - emitter.whitespace = true - emitter.indention = true - - if emitter.encoding != yaml_UTF8_ENCODING { - if !yaml_emitter_write_bom(emitter) { - return false - } - } - emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE - return true -} - -// Expect DOCUMENT-START or STREAM-END. -func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - - if event.typ == yaml_DOCUMENT_START_EVENT { - - if event.version_directive != nil { - if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { - return false - } - } - - for i := 0; i < len(event.tag_directives); i++ { - tag_directive := &event.tag_directives[i] - if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { - return false - } - if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { - return false - } - } - - for i := 0; i < len(default_tag_directives); i++ { - tag_directive := &default_tag_directives[i] - if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { - return false - } - } - - implicit := event.implicit - if !first || emitter.canonical { - implicit = false - } - - if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if event.version_directive != nil { - implicit = false - if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if len(event.tag_directives) > 0 { - implicit = false - for i := 0; i < len(event.tag_directives); i++ { - tag_directive := &event.tag_directives[i] - if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { - return false - } - if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { - return false - } - if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - if yaml_emitter_check_empty_document(emitter) { - implicit = false - } - if !implicit { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { - return false - } - if emitter.canonical { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE - return true - } - - if event.typ == yaml_STREAM_END_EVENT { - if emitter.open_ended { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_flush(emitter) { - return false - } - emitter.state = yaml_EMIT_END_STATE - return true - } - - return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") -} - -// Expect the root node. -func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) - return yaml_emitter_emit_node(emitter, event, true, false, false, false) -} - -// Expect DOCUMENT-END. -func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if event.typ != yaml_DOCUMENT_END_EVENT { - return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !event.implicit { - // [Go] Allocate the slice elsewhere. - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_flush(emitter) { - return false - } - emitter.state = yaml_EMIT_DOCUMENT_START_STATE - emitter.tag_directives = emitter.tag_directives[:0] - return true -} - -// Expect a flow item node. -func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.typ == yaml_SEQUENCE_END_EVENT { - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.canonical && !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - - return true - } - - if !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) - return yaml_emitter_emit_node(emitter, event, false, true, false, false) -} - -// Expect a flow key node. -func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.typ == yaml_MAPPING_END_EVENT { - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.canonical && !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - - if !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } - if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a flow value node. -func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { - if simple { - if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { - return false - } - } else { - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a block item node. -func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { - return false - } - } - if event.typ == yaml_SEQUENCE_END_EVENT { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) - return yaml_emitter_emit_node(emitter, event, false, true, false, false) -} - -// Expect a block key node. -func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_increase_indent(emitter, false, false) { - return false - } - } - if event.typ == yaml_MAPPING_END_EVENT { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } - if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a block value node. -func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { - if simple { - if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { - return false - } - } else { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a node. -func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, - root bool, sequence bool, mapping bool, simple_key bool) bool { - - emitter.root_context = root - emitter.sequence_context = sequence - emitter.mapping_context = mapping - emitter.simple_key_context = simple_key - - switch event.typ { - case yaml_ALIAS_EVENT: - return yaml_emitter_emit_alias(emitter, event) - case yaml_SCALAR_EVENT: - return yaml_emitter_emit_scalar(emitter, event) - case yaml_SEQUENCE_START_EVENT: - return yaml_emitter_emit_sequence_start(emitter, event) - case yaml_MAPPING_START_EVENT: - return yaml_emitter_emit_mapping_start(emitter, event) - default: - return yaml_emitter_set_emitter_error(emitter, - fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) - } -} - -// Expect ALIAS. -func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true -} - -// Expect SCALAR. -func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_select_scalar_style(emitter, event) { - return false - } - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - if !yaml_emitter_process_scalar(emitter) { - return false - } - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true -} - -// Expect SEQUENCE-START. -func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || - yaml_emitter_check_empty_sequence(emitter) { - emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE - } - return true -} - -// Expect MAPPING-START. -func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || - yaml_emitter_check_empty_mapping(emitter) { - emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE - } - return true -} - -// Check if the document content is an empty scalar. -func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { - return false // [Go] Huh? -} - -// Check if the next events represent an empty sequence. -func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && - emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT -} - -// Check if the next events represent an empty mapping. -func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && - emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT -} - -// Check if the next node can be expressed as a simple key. -func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { - length := 0 - switch emitter.events[emitter.events_head].typ { - case yaml_ALIAS_EVENT: - length += len(emitter.anchor_data.anchor) - case yaml_SCALAR_EVENT: - if emitter.scalar_data.multiline { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) + - len(emitter.scalar_data.value) - case yaml_SEQUENCE_START_EVENT: - if !yaml_emitter_check_empty_sequence(emitter) { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - case yaml_MAPPING_START_EVENT: - if !yaml_emitter_check_empty_mapping(emitter) { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - default: - return false - } - return length <= 128 -} - -// Determine an acceptable scalar style. -func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 - if no_tag && !event.implicit && !event.quoted_implicit { - return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") - } - - style := event.scalar_style() - if style == yaml_ANY_SCALAR_STYLE { - style = yaml_PLAIN_SCALAR_STYLE - } - if emitter.canonical { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - if emitter.simple_key_context && emitter.scalar_data.multiline { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - - if style == yaml_PLAIN_SCALAR_STYLE { - if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || - emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if no_tag && !event.implicit { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - } - if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { - if !emitter.scalar_data.single_quoted_allowed { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { - if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - - if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { - emitter.tag_data.handle = []byte{'!'} - } - emitter.scalar_data.style = style - return true -} - -// Write an anchor. -func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { - if emitter.anchor_data.anchor == nil { - return true - } - c := []byte{'&'} - if emitter.anchor_data.alias { - c[0] = '*' - } - if !yaml_emitter_write_indicator(emitter, c, true, false, false) { - return false - } - return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) -} - -// Write a tag. -func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { - if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { - return true - } - if len(emitter.tag_data.handle) > 0 { - if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { - return false - } - if len(emitter.tag_data.suffix) > 0 { - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - } - } else { - // [Go] Allocate these slices elsewhere. - if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { - return false - } - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { - return false - } - } - return true -} - -// Write a scalar. -func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { - switch emitter.scalar_data.style { - case yaml_PLAIN_SCALAR_STYLE: - return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_SINGLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_DOUBLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_LITERAL_SCALAR_STYLE: - return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) - - case yaml_FOLDED_SCALAR_STYLE: - return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) - } - panic("unknown scalar style") -} - -// Check if a %YAML directive is valid. -func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { - if version_directive.major != 1 || version_directive.minor != 1 { - return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") - } - return true -} - -// Check if a %TAG directive is valid. -func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { - handle := tag_directive.handle - prefix := tag_directive.prefix - if len(handle) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") - } - if handle[0] != '!' { - return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") - } - if handle[len(handle)-1] != '!' { - return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") - } - for i := 1; i < len(handle)-1; i += width(handle[i]) { - if !is_alpha(handle, i) { - return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") - } - } - if len(prefix) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") - } - return true -} - -// Check if an anchor is valid. -func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { - if len(anchor) == 0 { - problem := "anchor value must not be empty" - if alias { - problem = "alias value must not be empty" - } - return yaml_emitter_set_emitter_error(emitter, problem) - } - for i := 0; i < len(anchor); i += width(anchor[i]) { - if !is_alpha(anchor, i) { - problem := "anchor value must contain alphanumerical characters only" - if alias { - problem = "alias value must contain alphanumerical characters only" - } - return yaml_emitter_set_emitter_error(emitter, problem) - } - } - emitter.anchor_data.anchor = anchor - emitter.anchor_data.alias = alias - return true -} - -// Check if a tag is valid. -func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { - if len(tag) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") - } - for i := 0; i < len(emitter.tag_directives); i++ { - tag_directive := &emitter.tag_directives[i] - if bytes.HasPrefix(tag, tag_directive.prefix) { - emitter.tag_data.handle = tag_directive.handle - emitter.tag_data.suffix = tag[len(tag_directive.prefix):] - return true - } - } - emitter.tag_data.suffix = tag - return true -} - -// Check if a scalar is valid. -func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { - var ( - block_indicators = false - flow_indicators = false - line_breaks = false - special_characters = false - - leading_space = false - leading_break = false - trailing_space = false - trailing_break = false - break_space = false - space_break = false - - preceded_by_whitespace = false - followed_by_whitespace = false - previous_space = false - previous_break = false - ) - - emitter.scalar_data.value = value - - if len(value) == 0 { - emitter.scalar_data.multiline = false - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = false - return true - } - - if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { - block_indicators = true - flow_indicators = true - } - - preceded_by_whitespace = true - for i, w := 0, 0; i < len(value); i += w { - w = width(value[i]) - followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) - - if i == 0 { - switch value[i] { - case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': - flow_indicators = true - block_indicators = true - case '?', ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '-': - if followed_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } else { - switch value[i] { - case ',', '?', '[', ']', '{', '}': - flow_indicators = true - case ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '#': - if preceded_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } - - if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { - special_characters = true - } - if is_space(value, i) { - if i == 0 { - leading_space = true - } - if i+width(value[i]) == len(value) { - trailing_space = true - } - if previous_break { - break_space = true - } - previous_space = true - previous_break = false - } else if is_break(value, i) { - line_breaks = true - if i == 0 { - leading_break = true - } - if i+width(value[i]) == len(value) { - trailing_break = true - } - if previous_space { - space_break = true - } - previous_space = false - previous_break = true - } else { - previous_space = false - previous_break = false - } - - // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. - preceded_by_whitespace = is_blankz(value, i) - } - - emitter.scalar_data.multiline = line_breaks - emitter.scalar_data.flow_plain_allowed = true - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = true - - if leading_space || leading_break || trailing_space || trailing_break { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - if trailing_space { - emitter.scalar_data.block_allowed = false - } - if break_space { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - } - if space_break || special_characters { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - emitter.scalar_data.block_allowed = false - } - if line_breaks { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - if flow_indicators { - emitter.scalar_data.flow_plain_allowed = false - } - if block_indicators { - emitter.scalar_data.block_plain_allowed = false - } - return true -} - -// Check if the event data is valid. -func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - emitter.anchor_data.anchor = nil - emitter.tag_data.handle = nil - emitter.tag_data.suffix = nil - emitter.scalar_data.value = nil - - switch event.typ { - case yaml_ALIAS_EVENT: - if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { - return false - } - - case yaml_SCALAR_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - if !yaml_emitter_analyze_scalar(emitter, event.value) { - return false - } - - case yaml_SEQUENCE_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - - case yaml_MAPPING_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - } - return true -} - -// Write the BOM character. -func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { - if !flush(emitter) { - return false - } - pos := emitter.buffer_pos - emitter.buffer[pos+0] = '\xEF' - emitter.buffer[pos+1] = '\xBB' - emitter.buffer[pos+2] = '\xBF' - emitter.buffer_pos += 3 - return true -} - -func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { - indent := emitter.indent - if indent < 0 { - indent = 0 - } - if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { - if !put_break(emitter) { - return false - } - } - for emitter.column < indent { - if !put(emitter, ' ') { - return false - } - } - emitter.whitespace = true - emitter.indention = true - return true -} - -func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !write_all(emitter, indicator) { - return false - } - emitter.whitespace = is_whitespace - emitter.indention = (emitter.indention && is_indention) - emitter.open_ended = false - return true -} - -func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { - if !write_all(emitter, value) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !write_all(emitter, value) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - for i := 0; i < len(value); { - var must_write bool - switch value[i] { - case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': - must_write = true - default: - must_write = is_alpha(value, i) - } - if must_write { - if !write(emitter, value, &i) { - return false - } - } else { - w := width(value[i]) - for k := 0; k < w; k++ { - octet := value[i] - i++ - if !put(emitter, '%') { - return false - } - - c := octet >> 4 - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - - c = octet & 0x0f - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - } - } - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - - spaces := false - breaks := false - for i := 0; i < len(value); { - if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - - emitter.whitespace = false - emitter.indention = false - if emitter.root_context { - emitter.open_ended = true - } - - return true -} - -func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - - if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { - return false - } - - spaces := false - breaks := false - for i := 0; i < len(value); { - if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if value[i] == '\'' { - if !put(emitter, '\'') { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - spaces := false - if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { - return false - } - - for i := 0; i < len(value); { - if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || - is_bom(value, i) || is_break(value, i) || - value[i] == '"' || value[i] == '\\' { - - octet := value[i] - - var w int - var v rune - switch { - case octet&0x80 == 0x00: - w, v = 1, rune(octet&0x7F) - case octet&0xE0 == 0xC0: - w, v = 2, rune(octet&0x1F) - case octet&0xF0 == 0xE0: - w, v = 3, rune(octet&0x0F) - case octet&0xF8 == 0xF0: - w, v = 4, rune(octet&0x07) - } - for k := 1; k < w; k++ { - octet = value[i+k] - v = (v << 6) + (rune(octet) & 0x3F) - } - i += w - - if !put(emitter, '\\') { - return false - } - - var ok bool - switch v { - case 0x00: - ok = put(emitter, '0') - case 0x07: - ok = put(emitter, 'a') - case 0x08: - ok = put(emitter, 'b') - case 0x09: - ok = put(emitter, 't') - case 0x0A: - ok = put(emitter, 'n') - case 0x0b: - ok = put(emitter, 'v') - case 0x0c: - ok = put(emitter, 'f') - case 0x0d: - ok = put(emitter, 'r') - case 0x1b: - ok = put(emitter, 'e') - case 0x22: - ok = put(emitter, '"') - case 0x5c: - ok = put(emitter, '\\') - case 0x85: - ok = put(emitter, 'N') - case 0xA0: - ok = put(emitter, '_') - case 0x2028: - ok = put(emitter, 'L') - case 0x2029: - ok = put(emitter, 'P') - default: - if v <= 0xFF { - ok = put(emitter, 'x') - w = 2 - } else if v <= 0xFFFF { - ok = put(emitter, 'u') - w = 4 - } else { - ok = put(emitter, 'U') - w = 8 - } - for k := (w - 1) * 4; ok && k >= 0; k -= 4 { - digit := byte((v >> uint(k)) & 0x0F) - if digit < 10 { - ok = put(emitter, digit+'0') - } else { - ok = put(emitter, digit+'A'-10) - } - } - } - if !ok { - return false - } - spaces = false - } else if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { - if !yaml_emitter_write_indent(emitter) { - return false - } - if is_space(value, i+1) { - if !put(emitter, '\\') { - return false - } - } - i += width(value[i]) - } else if !write(emitter, value, &i) { - return false - } - spaces = true - } else { - if !write(emitter, value, &i) { - return false - } - spaces = false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { - if is_space(value, 0) || is_break(value, 0) { - indent_hint := []byte{'0' + byte(emitter.best_indent)} - if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { - return false - } - } - - emitter.open_ended = false - - var chomp_hint [1]byte - if len(value) == 0 { - chomp_hint[0] = '-' - } else { - i := len(value) - 1 - for value[i]&0xC0 == 0x80 { - i-- - } - if !is_break(value, i) { - chomp_hint[0] = '-' - } else if i == 0 { - chomp_hint[0] = '+' - emitter.open_ended = true - } else { - i-- - for value[i]&0xC0 == 0x80 { - i-- - } - if is_break(value, i) { - chomp_hint[0] = '+' - emitter.open_ended = true - } - } - } - if chomp_hint[0] != 0 { - if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { - return false - } - } - return true -} - -func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { - if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { - return false - } - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - if !put_break(emitter) { - return false - } - emitter.indention = true - emitter.whitespace = true - breaks := true - for i := 0; i < len(value); { - if is_break(value, i) { - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - breaks = false - } - } - - return true -} - -func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { - if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { - return false - } - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - - if !put_break(emitter) { - return false - } - emitter.indention = true - emitter.whitespace = true - - breaks := true - leading_spaces := true - for i := 0; i < len(value); { - if is_break(value, i) { - if !breaks && !leading_spaces && value[i] == '\n' { - k := 0 - for is_break(value, k) { - k += width(value[k]) - } - if !is_blankz(value, k) { - if !put_break(emitter) { - return false - } - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - leading_spaces = is_blank(value, i) - } - if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - emitter.indention = false - breaks = false - } - } - return true -} diff --git a/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/encode.go b/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/encode.go deleted file mode 100644 index 0ee738e11b6..00000000000 --- a/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/encode.go +++ /dev/null @@ -1,390 +0,0 @@ -package yaml - -import ( - "encoding" - "fmt" - "io" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -// jsonNumber is the interface of the encoding/json.Number datatype. -// Repeating the interface here avoids a dependency on encoding/json, and also -// supports other libraries like jsoniter, which use a similar datatype with -// the same interface. Detecting this interface is useful when dealing with -// structures containing json.Number, which is a string under the hood. The -// encoder should prefer the use of Int64(), Float64() and string(), in that -// order, when encoding this type. -type jsonNumber interface { - Float64() (float64, error) - Int64() (int64, error) - String() string -} - -type encoder struct { - emitter yaml_emitter_t - event yaml_event_t - out []byte - flow bool - // doneInit holds whether the initial stream_start_event has been - // emitted. - doneInit bool -} - -func newEncoder() *encoder { - e := &encoder{} - yaml_emitter_initialize(&e.emitter) - yaml_emitter_set_output_string(&e.emitter, &e.out) - yaml_emitter_set_unicode(&e.emitter, true) - return e -} - -func newEncoderWithWriter(w io.Writer) *encoder { - e := &encoder{} - yaml_emitter_initialize(&e.emitter) - yaml_emitter_set_output_writer(&e.emitter, w) - yaml_emitter_set_unicode(&e.emitter, true) - return e -} - -func (e *encoder) init() { - if e.doneInit { - return - } - yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) - e.emit() - e.doneInit = true -} - -func (e *encoder) finish() { - e.emitter.open_ended = false - yaml_stream_end_event_initialize(&e.event) - e.emit() -} - -func (e *encoder) destroy() { - yaml_emitter_delete(&e.emitter) -} - -func (e *encoder) emit() { - // This will internally delete the e.event value. - e.must(yaml_emitter_emit(&e.emitter, &e.event)) -} - -func (e *encoder) must(ok bool) { - if !ok { - msg := e.emitter.problem - if msg == "" { - msg = "unknown problem generating YAML content" - } - failf("%s", msg) - } -} - -func (e *encoder) marshalDoc(tag string, in reflect.Value) { - e.init() - yaml_document_start_event_initialize(&e.event, nil, nil, true) - e.emit() - e.marshal(tag, in) - yaml_document_end_event_initialize(&e.event, true) - e.emit() -} - -func (e *encoder) marshal(tag string, in reflect.Value) { - if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { - e.nilv() - return - } - iface := in.Interface() - switch m := iface.(type) { - case jsonNumber: - integer, err := m.Int64() - if err == nil { - // In this case the json.Number is a valid int64 - in = reflect.ValueOf(integer) - break - } - float, err := m.Float64() - if err == nil { - // In this case the json.Number is a valid float64 - in = reflect.ValueOf(float) - break - } - // fallback case - no number could be obtained - in = reflect.ValueOf(m.String()) - case time.Time, *time.Time: - // Although time.Time implements TextMarshaler, - // we don't want to treat it as a string for YAML - // purposes because YAML has special support for - // timestamps. - case Marshaler: - v, err := m.MarshalYAML() - if err != nil { - fail(err) - } - if v == nil { - e.nilv() - return - } - in = reflect.ValueOf(v) - case encoding.TextMarshaler: - text, err := m.MarshalText() - if err != nil { - fail(err) - } - in = reflect.ValueOf(string(text)) - case nil: - e.nilv() - return - } - switch in.Kind() { - case reflect.Interface: - e.marshal(tag, in.Elem()) - case reflect.Map: - e.mapv(tag, in) - case reflect.Ptr: - if in.Type() == ptrTimeType { - e.timev(tag, in.Elem()) - } else { - e.marshal(tag, in.Elem()) - } - case reflect.Struct: - if in.Type() == timeType { - e.timev(tag, in) - } else { - e.structv(tag, in) - } - case reflect.Slice, reflect.Array: - if in.Type().Elem() == mapItemType { - e.itemsv(tag, in) - } else { - e.slicev(tag, in) - } - case reflect.String: - e.stringv(tag, in) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - if in.Type() == durationType { - e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) - } else { - e.intv(tag, in) - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - e.uintv(tag, in) - case reflect.Float32, reflect.Float64: - e.floatv(tag, in) - case reflect.Bool: - e.boolv(tag, in) - default: - panic("cannot marshal type: " + in.Type().String()) - } -} - -func (e *encoder) mapv(tag string, in reflect.Value) { - e.mappingv(tag, func() { - keys := keyList(in.MapKeys()) - sort.Sort(keys) - for _, k := range keys { - e.marshal("", k) - e.marshal("", in.MapIndex(k)) - } - }) -} - -func (e *encoder) itemsv(tag string, in reflect.Value) { - e.mappingv(tag, func() { - slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) - for _, item := range slice { - e.marshal("", reflect.ValueOf(item.Key)) - e.marshal("", reflect.ValueOf(item.Value)) - } - }) -} - -func (e *encoder) structv(tag string, in reflect.Value) { - sinfo, err := getStructInfo(in.Type()) - if err != nil { - panic(err) - } - e.mappingv(tag, func() { - for _, info := range sinfo.FieldsList { - var value reflect.Value - if info.Inline == nil { - value = in.Field(info.Num) - } else { - value = in.FieldByIndex(info.Inline) - } - if info.OmitEmpty && isZero(value) { - continue - } - e.marshal("", reflect.ValueOf(info.Key)) - e.flow = info.Flow - e.marshal("", value) - } - if sinfo.InlineMap >= 0 { - m := in.Field(sinfo.InlineMap) - if m.Len() > 0 { - e.flow = false - keys := keyList(m.MapKeys()) - sort.Sort(keys) - for _, k := range keys { - if _, found := sinfo.FieldsMap[k.String()]; found { - panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) - } - e.marshal("", k) - e.flow = false - e.marshal("", m.MapIndex(k)) - } - } - } - }) -} - -func (e *encoder) mappingv(tag string, f func()) { - implicit := tag == "" - style := yaml_BLOCK_MAPPING_STYLE - if e.flow { - e.flow = false - style = yaml_FLOW_MAPPING_STYLE - } - yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) - e.emit() - f() - yaml_mapping_end_event_initialize(&e.event) - e.emit() -} - -func (e *encoder) slicev(tag string, in reflect.Value) { - implicit := tag == "" - style := yaml_BLOCK_SEQUENCE_STYLE - if e.flow { - e.flow = false - style = yaml_FLOW_SEQUENCE_STYLE - } - e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) - e.emit() - n := in.Len() - for i := 0; i < n; i++ { - e.marshal("", in.Index(i)) - } - e.must(yaml_sequence_end_event_initialize(&e.event)) - e.emit() -} - -// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. -// -// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported -// in YAML 1.2 and by this package, but these should be marshalled quoted for -// the time being for compatibility with other parsers. -func isBase60Float(s string) (result bool) { - // Fast path. - if s == "" { - return false - } - c := s[0] - if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { - return false - } - // Do the full match. - return base60float.MatchString(s) -} - -// From http://yaml.org/type/float.html, except the regular expression there -// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. -var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) - -func (e *encoder) stringv(tag string, in reflect.Value) { - var style yaml_scalar_style_t - s := in.String() - canUsePlain := true - switch { - case !utf8.ValidString(s): - if tag == yaml_BINARY_TAG { - failf("explicitly tagged !!binary data must be base64-encoded") - } - if tag != "" { - failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) - } - // It can't be encoded directly as YAML so use a binary tag - // and encode it as base64. - tag = yaml_BINARY_TAG - s = encodeBase64(s) - case tag == "": - // Check to see if it would resolve to a specific - // tag when encoded unquoted. If it doesn't, - // there's no need to quote it. - rtag, _ := resolve("", s) - canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s) - } - // Note: it's possible for user code to emit invalid YAML - // if they explicitly specify a tag and a string containing - // text that's incompatible with that tag. - switch { - case strings.Contains(s, "\n"): - style = yaml_LITERAL_SCALAR_STYLE - case canUsePlain: - style = yaml_PLAIN_SCALAR_STYLE - default: - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - e.emitScalar(s, "", tag, style) -} - -func (e *encoder) boolv(tag string, in reflect.Value) { - var s string - if in.Bool() { - s = "true" - } else { - s = "false" - } - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) intv(tag string, in reflect.Value) { - s := strconv.FormatInt(in.Int(), 10) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) uintv(tag string, in reflect.Value) { - s := strconv.FormatUint(in.Uint(), 10) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) timev(tag string, in reflect.Value) { - t := in.Interface().(time.Time) - s := t.Format(time.RFC3339Nano) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) floatv(tag string, in reflect.Value) { - // Issue #352: When formatting, use the precision of the underlying value - precision := 64 - if in.Kind() == reflect.Float32 { - precision = 32 - } - - s := strconv.FormatFloat(in.Float(), 'g', -1, precision) - switch s { - case "+Inf": - s = ".inf" - case "-Inf": - s = "-.inf" - case "NaN": - s = ".nan" - } - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) nilv() { - e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { - implicit := tag == "" - e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) - e.emit() -} diff --git a/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/go.mod b/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/go.mod deleted file mode 100644 index 1934e876945..00000000000 --- a/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module "gopkg.in/yaml.v2" - -require ( - "gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405 -) diff --git a/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/parserc.go b/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/parserc.go deleted file mode 100644 index 81d05dfe573..00000000000 --- a/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/parserc.go +++ /dev/null @@ -1,1095 +0,0 @@ -package yaml - -import ( - "bytes" -) - -// The parser implements the following grammar: -// -// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// implicit_document ::= block_node DOCUMENT-END* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// block_node_or_indentless_sequence ::= -// ALIAS -// | properties (block_content | indentless_block_sequence)? -// | block_content -// | indentless_block_sequence -// block_node ::= ALIAS -// | properties block_content? -// | block_content -// flow_node ::= ALIAS -// | properties flow_content? -// | flow_content -// properties ::= TAG ANCHOR? | ANCHOR TAG? -// block_content ::= block_collection | flow_collection | SCALAR -// flow_content ::= flow_collection | SCALAR -// block_collection ::= block_sequence | block_mapping -// flow_collection ::= flow_sequence | flow_mapping -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// block_mapping ::= BLOCK-MAPPING_START -// ((KEY block_node_or_indentless_sequence?)? -// (VALUE block_node_or_indentless_sequence?)?)* -// BLOCK-END -// flow_sequence ::= FLOW-SEQUENCE-START -// (flow_sequence_entry FLOW-ENTRY)* -// flow_sequence_entry? -// FLOW-SEQUENCE-END -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// flow_mapping ::= FLOW-MAPPING-START -// (flow_mapping_entry FLOW-ENTRY)* -// flow_mapping_entry? -// FLOW-MAPPING-END -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - -// Peek the next token in the token queue. -func peek_token(parser *yaml_parser_t) *yaml_token_t { - if parser.token_available || yaml_parser_fetch_more_tokens(parser) { - return &parser.tokens[parser.tokens_head] - } - return nil -} - -// Remove the next token from the queue (must be called after peek_token). -func skip_token(parser *yaml_parser_t) { - parser.token_available = false - parser.tokens_parsed++ - parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN - parser.tokens_head++ -} - -// Get the next event. -func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { - // Erase the event object. - *event = yaml_event_t{} - - // No events after the end of the stream or error. - if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { - return true - } - - // Generate the next event. - return yaml_parser_state_machine(parser, event) -} - -// Set parser error. -func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.problem = problem - parser.problem_mark = problem_mark - return false -} - -func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = problem_mark - return false -} - -// State dispatcher. -func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { - //trace("yaml_parser_state_machine", "state:", parser.state.String()) - - switch parser.state { - case yaml_PARSE_STREAM_START_STATE: - return yaml_parser_parse_stream_start(parser, event) - - case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, true) - - case yaml_PARSE_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, false) - - case yaml_PARSE_DOCUMENT_CONTENT_STATE: - return yaml_parser_parse_document_content(parser, event) - - case yaml_PARSE_DOCUMENT_END_STATE: - return yaml_parser_parse_document_end(parser, event) - - case yaml_PARSE_BLOCK_NODE_STATE: - return yaml_parser_parse_node(parser, event, true, false) - - case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: - return yaml_parser_parse_node(parser, event, true, true) - - case yaml_PARSE_FLOW_NODE_STATE: - return yaml_parser_parse_node(parser, event, false, false) - - case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, true) - - case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, false) - - case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_indentless_sequence_entry(parser, event) - - case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, true) - - case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, false) - - case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: - return yaml_parser_parse_block_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, true) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, false) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) - - case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, true) - - case yaml_PARSE_FLOW_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, true) - - default: - panic("invalid parser state") - } -} - -// Parse the production: -// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// ************ -func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_STREAM_START_TOKEN { - return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) - } - parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE - *event = yaml_event_t{ - typ: yaml_STREAM_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - encoding: token.encoding, - } - skip_token(parser) - return true -} - -// Parse the productions: -// implicit_document ::= block_node DOCUMENT-END* -// * -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// ************************* -func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { - - token := peek_token(parser) - if token == nil { - return false - } - - // Parse extra document end indicators. - if !implicit { - for token.typ == yaml_DOCUMENT_END_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } - - if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && - token.typ != yaml_TAG_DIRECTIVE_TOKEN && - token.typ != yaml_DOCUMENT_START_TOKEN && - token.typ != yaml_STREAM_END_TOKEN { - // Parse an implicit document. - if !yaml_parser_process_directives(parser, nil, nil) { - return false - } - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_BLOCK_NODE_STATE - - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - } else if token.typ != yaml_STREAM_END_TOKEN { - // Parse an explicit document. - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - start_mark := token.start_mark - if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { - return false - } - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_DOCUMENT_START_TOKEN { - yaml_parser_set_parser_error(parser, - "did not find expected ", token.start_mark) - return false - } - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE - end_mark := token.end_mark - - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: false, - } - skip_token(parser) - - } else { - // Parse the stream end. - parser.state = yaml_PARSE_END_STATE - *event = yaml_event_t{ - typ: yaml_STREAM_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - } - - return true -} - -// Parse the productions: -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// *********** -// -func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || - token.typ == yaml_TAG_DIRECTIVE_TOKEN || - token.typ == yaml_DOCUMENT_START_TOKEN || - token.typ == yaml_DOCUMENT_END_TOKEN || - token.typ == yaml_STREAM_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - return yaml_parser_process_empty_scalar(parser, event, - token.start_mark) - } - return yaml_parser_parse_node(parser, event, true, false) -} - -// Parse the productions: -// implicit_document ::= block_node DOCUMENT-END* -// ************* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// -func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - start_mark := token.start_mark - end_mark := token.start_mark - - implicit := true - if token.typ == yaml_DOCUMENT_END_TOKEN { - end_mark = token.end_mark - skip_token(parser) - implicit = false - } - - parser.tag_directives = parser.tag_directives[:0] - - parser.state = yaml_PARSE_DOCUMENT_START_STATE - *event = yaml_event_t{ - typ: yaml_DOCUMENT_END_EVENT, - start_mark: start_mark, - end_mark: end_mark, - implicit: implicit, - } - return true -} - -// Parse the productions: -// block_node_or_indentless_sequence ::= -// ALIAS -// ***** -// | properties (block_content | indentless_block_sequence)? -// ********** * -// | block_content | indentless_block_sequence -// * -// block_node ::= ALIAS -// ***** -// | properties block_content? -// ********** * -// | block_content -// * -// flow_node ::= ALIAS -// ***** -// | properties flow_content? -// ********** * -// | flow_content -// * -// properties ::= TAG ANCHOR? | ANCHOR TAG? -// ************************* -// block_content ::= block_collection | flow_collection | SCALAR -// ****** -// flow_content ::= flow_collection | SCALAR -// ****** -func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { - //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_ALIAS_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - *event = yaml_event_t{ - typ: yaml_ALIAS_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - anchor: token.value, - } - skip_token(parser) - return true - } - - start_mark := token.start_mark - end_mark := token.start_mark - - var tag_token bool - var tag_handle, tag_suffix, anchor []byte - var tag_mark yaml_mark_t - if token.typ == yaml_ANCHOR_TOKEN { - anchor = token.value - start_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_TAG_TOKEN { - tag_token = true - tag_handle = token.value - tag_suffix = token.suffix - tag_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } else if token.typ == yaml_TAG_TOKEN { - tag_token = true - tag_handle = token.value - tag_suffix = token.suffix - start_mark = token.start_mark - tag_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_ANCHOR_TOKEN { - anchor = token.value - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } - - var tag []byte - if tag_token { - if len(tag_handle) == 0 { - tag = tag_suffix - tag_suffix = nil - } else { - for i := range parser.tag_directives { - if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { - tag = append([]byte(nil), parser.tag_directives[i].prefix...) - tag = append(tag, tag_suffix...) - break - } - } - if len(tag) == 0 { - yaml_parser_set_parser_error_context(parser, - "while parsing a node", start_mark, - "found undefined tag handle", tag_mark) - return false - } - } - } - - implicit := len(tag) == 0 - if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - return true - } - if token.typ == yaml_SCALAR_TOKEN { - var plain_implicit, quoted_implicit bool - end_mark = token.end_mark - if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { - plain_implicit = true - } else if len(tag) == 0 { - quoted_implicit = true - } - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - value: token.value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(token.style), - } - skip_token(parser) - return true - } - if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { - // [Go] Some of the events below can be merged as they differ only on style. - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), - } - return true - } - if token.typ == yaml_FLOW_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - return true - } - if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - return true - } - if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), - } - return true - } - if len(anchor) > 0 || len(tag) > 0 { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - quoted_implicit: false, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - return true - } - - context := "while parsing a flow node" - if block { - context = "while parsing a block node" - } - yaml_parser_set_parser_error_context(parser, context, start_mark, - "did not find expected node content", token.start_mark) - return false -} - -// Parse the productions: -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// ******************** *********** * ********* -// -func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } else { - parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } - if token.typ == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true - } - - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a block collection", context_mark, - "did not find expected '-' indicator", token.start_mark) -} - -// Parse the productions: -// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// *********** * -func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_BLOCK_ENTRY_TOKEN && - token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? - } - return true -} - -// Parse the productions: -// block_mapping ::= BLOCK-MAPPING_START -// ******************* -// ((KEY block_node_or_indentless_sequence?)? -// *** * -// (VALUE block_node_or_indentless_sequence?)?)* -// -// BLOCK-END -// ********* -// -func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_KEY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } else { - parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } else if token.typ == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - return true - } - - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a block mapping", context_mark, - "did not find expected key", token.start_mark) -} - -// Parse the productions: -// block_mapping ::= BLOCK-MAPPING_START -// -// ((KEY block_node_or_indentless_sequence?)? -// -// (VALUE block_node_or_indentless_sequence?)?)* -// ***** * -// BLOCK-END -// -// -func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VALUE_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Parse the productions: -// flow_sequence ::= FLOW-SEQUENCE-START -// ******************* -// (flow_sequence_entry FLOW-ENTRY)* -// * ********** -// flow_sequence_entry? -// * -// FLOW-SEQUENCE-END -// ***************** -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * -// -func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - if !first { - if token.typ == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow sequence", context_mark, - "did not find expected ',' or ']'", token.start_mark) - } - } - - if token.typ == yaml_KEY_TOKEN { - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - implicit: true, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - skip_token(parser) - return true - } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true -} - -// -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// *** * -// -func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_FLOW_ENTRY_TOKEN && - token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - mark := token.end_mark - skip_token(parser) - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) -} - -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// ***** * -// -func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VALUE_TOKEN { - skip_token(parser) - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * -// -func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? - } - return true -} - -// Parse the productions: -// flow_mapping ::= FLOW-MAPPING-START -// ****************** -// (flow_mapping_entry FLOW-ENTRY)* -// * ********** -// flow_mapping_entry? -// ****************** -// FLOW-MAPPING-END -// **************** -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * *** * -// -func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ != yaml_FLOW_MAPPING_END_TOKEN { - if !first { - if token.typ == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow mapping", context_mark, - "did not find expected ',' or '}'", token.start_mark) - } - } - - if token.typ == yaml_KEY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_FLOW_ENTRY_TOKEN && - token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } else { - parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) - } - } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - return true -} - -// Parse the productions: -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * ***** * -// -func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { - token := peek_token(parser) - if token == nil { - return false - } - if empty { - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) - } - if token.typ == yaml_VALUE_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Generate an empty scalar event. -func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: mark, - end_mark: mark, - value: nil, // Empty - implicit: true, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - return true -} - -var default_tag_directives = []yaml_tag_directive_t{ - {[]byte("!"), []byte("!")}, - {[]byte("!!"), []byte("tag:yaml.org,2002:")}, -} - -// Parse directives. -func yaml_parser_process_directives(parser *yaml_parser_t, - version_directive_ref **yaml_version_directive_t, - tag_directives_ref *[]yaml_tag_directive_t) bool { - - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - - token := peek_token(parser) - if token == nil { - return false - } - - for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { - if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { - if version_directive != nil { - yaml_parser_set_parser_error(parser, - "found duplicate %YAML directive", token.start_mark) - return false - } - if token.major != 1 || token.minor != 1 { - yaml_parser_set_parser_error(parser, - "found incompatible YAML document", token.start_mark) - return false - } - version_directive = &yaml_version_directive_t{ - major: token.major, - minor: token.minor, - } - } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { - value := yaml_tag_directive_t{ - handle: token.value, - prefix: token.prefix, - } - if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { - return false - } - tag_directives = append(tag_directives, value) - } - - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - - for i := range default_tag_directives { - if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { - return false - } - } - - if version_directive_ref != nil { - *version_directive_ref = version_directive - } - if tag_directives_ref != nil { - *tag_directives_ref = tag_directives - } - return true -} - -// Append a tag directive to the directives stack. -func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { - for i := range parser.tag_directives { - if bytes.Equal(value.handle, parser.tag_directives[i].handle) { - if allow_duplicates { - return true - } - return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) - } - } - - // [Go] I suspect the copy is unnecessary. This was likely done - // because there was no way to track ownership of the data. - value_copy := yaml_tag_directive_t{ - handle: make([]byte, len(value.handle)), - prefix: make([]byte, len(value.prefix)), - } - copy(value_copy.handle, value.handle) - copy(value_copy.prefix, value.prefix) - parser.tag_directives = append(parser.tag_directives, value_copy) - return true -} diff --git a/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/readerc.go b/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/readerc.go deleted file mode 100644 index 7c1f5fac3db..00000000000 --- a/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/readerc.go +++ /dev/null @@ -1,412 +0,0 @@ -package yaml - -import ( - "io" -) - -// Set the reader error and return 0. -func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { - parser.error = yaml_READER_ERROR - parser.problem = problem - parser.problem_offset = offset - parser.problem_value = value - return false -} - -// Byte order marks. -const ( - bom_UTF8 = "\xef\xbb\xbf" - bom_UTF16LE = "\xff\xfe" - bom_UTF16BE = "\xfe\xff" -) - -// Determine the input stream encoding by checking the BOM symbol. If no BOM is -// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. -func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { - // Ensure that we had enough bytes in the raw buffer. - for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { - if !yaml_parser_update_raw_buffer(parser) { - return false - } - } - - // Determine the encoding. - buf := parser.raw_buffer - pos := parser.raw_buffer_pos - avail := len(buf) - pos - if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { - parser.encoding = yaml_UTF16LE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { - parser.encoding = yaml_UTF16BE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { - parser.encoding = yaml_UTF8_ENCODING - parser.raw_buffer_pos += 3 - parser.offset += 3 - } else { - parser.encoding = yaml_UTF8_ENCODING - } - return true -} - -// Update the raw buffer. -func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { - size_read := 0 - - // Return if the raw buffer is full. - if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { - return true - } - - // Return on EOF. - if parser.eof { - return true - } - - // Move the remaining bytes in the raw buffer to the beginning. - if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { - copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) - } - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] - parser.raw_buffer_pos = 0 - - // Call the read handler to fill the buffer. - size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] - if err == io.EOF { - parser.eof = true - } else if err != nil { - return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) - } - return true -} - -// Ensure that the buffer contains at least `length` characters. -// Return true on success, false on failure. -// -// The length is supposed to be significantly less that the buffer size. -func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { - if parser.read_handler == nil { - panic("read handler must be set") - } - - // [Go] This function was changed to guarantee the requested length size at EOF. - // The fact we need to do this is pretty awful, but the description above implies - // for that to be the case, and there are tests - - // If the EOF flag is set and the raw buffer is empty, do nothing. - if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { - // [Go] ACTUALLY! Read the documentation of this function above. - // This is just broken. To return true, we need to have the - // given length in the buffer. Not doing that means every single - // check that calls this function to make sure the buffer has a - // given length is Go) panicking; or C) accessing invalid memory. - //return true - } - - // Return if the buffer contains enough characters. - if parser.unread >= length { - return true - } - - // Determine the input encoding if it is not known yet. - if parser.encoding == yaml_ANY_ENCODING { - if !yaml_parser_determine_encoding(parser) { - return false - } - } - - // Move the unread characters to the beginning of the buffer. - buffer_len := len(parser.buffer) - if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { - copy(parser.buffer, parser.buffer[parser.buffer_pos:]) - buffer_len -= parser.buffer_pos - parser.buffer_pos = 0 - } else if parser.buffer_pos == buffer_len { - buffer_len = 0 - parser.buffer_pos = 0 - } - - // Open the whole buffer for writing, and cut it before returning. - parser.buffer = parser.buffer[:cap(parser.buffer)] - - // Fill the buffer until it has enough characters. - first := true - for parser.unread < length { - - // Fill the raw buffer if necessary. - if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { - if !yaml_parser_update_raw_buffer(parser) { - parser.buffer = parser.buffer[:buffer_len] - return false - } - } - first = false - - // Decode the raw buffer. - inner: - for parser.raw_buffer_pos != len(parser.raw_buffer) { - var value rune - var width int - - raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos - - // Decode the next character. - switch parser.encoding { - case yaml_UTF8_ENCODING: - // Decode a UTF-8 character. Check RFC 3629 - // (http://www.ietf.org/rfc/rfc3629.txt) for more details. - // - // The following table (taken from the RFC) is used for - // decoding. - // - // Char. number range | UTF-8 octet sequence - // (hexadecimal) | (binary) - // --------------------+------------------------------------ - // 0000 0000-0000 007F | 0xxxxxxx - // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx - // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx - // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - // - // Additionally, the characters in the range 0xD800-0xDFFF - // are prohibited as they are reserved for use with UTF-16 - // surrogate pairs. - - // Determine the length of the UTF-8 sequence. - octet := parser.raw_buffer[parser.raw_buffer_pos] - switch { - case octet&0x80 == 0x00: - width = 1 - case octet&0xE0 == 0xC0: - width = 2 - case octet&0xF0 == 0xE0: - width = 3 - case octet&0xF8 == 0xF0: - width = 4 - default: - // The leading octet is invalid. - return yaml_parser_set_reader_error(parser, - "invalid leading UTF-8 octet", - parser.offset, int(octet)) - } - - // Check if the raw buffer contains an incomplete character. - if width > raw_unread { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-8 octet sequence", - parser.offset, -1) - } - break inner - } - - // Decode the leading octet. - switch { - case octet&0x80 == 0x00: - value = rune(octet & 0x7F) - case octet&0xE0 == 0xC0: - value = rune(octet & 0x1F) - case octet&0xF0 == 0xE0: - value = rune(octet & 0x0F) - case octet&0xF8 == 0xF0: - value = rune(octet & 0x07) - default: - value = 0 - } - - // Check and decode the trailing octets. - for k := 1; k < width; k++ { - octet = parser.raw_buffer[parser.raw_buffer_pos+k] - - // Check if the octet is valid. - if (octet & 0xC0) != 0x80 { - return yaml_parser_set_reader_error(parser, - "invalid trailing UTF-8 octet", - parser.offset+k, int(octet)) - } - - // Decode the octet. - value = (value << 6) + rune(octet&0x3F) - } - - // Check the length of the sequence against the value. - switch { - case width == 1: - case width == 2 && value >= 0x80: - case width == 3 && value >= 0x800: - case width == 4 && value >= 0x10000: - default: - return yaml_parser_set_reader_error(parser, - "invalid length of a UTF-8 sequence", - parser.offset, -1) - } - - // Check the range of the value. - if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { - return yaml_parser_set_reader_error(parser, - "invalid Unicode character", - parser.offset, int(value)) - } - - case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: - var low, high int - if parser.encoding == yaml_UTF16LE_ENCODING { - low, high = 0, 1 - } else { - low, high = 1, 0 - } - - // The UTF-16 encoding is not as simple as one might - // naively think. Check RFC 2781 - // (http://www.ietf.org/rfc/rfc2781.txt). - // - // Normally, two subsequent bytes describe a Unicode - // character. However a special technique (called a - // surrogate pair) is used for specifying character - // values larger than 0xFFFF. - // - // A surrogate pair consists of two pseudo-characters: - // high surrogate area (0xD800-0xDBFF) - // low surrogate area (0xDC00-0xDFFF) - // - // The following formulas are used for decoding - // and encoding characters using surrogate pairs: - // - // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) - // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) - // W1 = 110110yyyyyyyyyy - // W2 = 110111xxxxxxxxxx - // - // where U is the character value, W1 is the high surrogate - // area, W2 is the low surrogate area. - - // Check for incomplete UTF-16 character. - if raw_unread < 2 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 character", - parser.offset, -1) - } - break inner - } - - // Get the character. - value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) - - // Check for unexpected low surrogate area. - if value&0xFC00 == 0xDC00 { - return yaml_parser_set_reader_error(parser, - "unexpected low surrogate area", - parser.offset, int(value)) - } - - // Check for a high surrogate area. - if value&0xFC00 == 0xD800 { - width = 4 - - // Check for incomplete surrogate pair. - if raw_unread < 4 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 surrogate pair", - parser.offset, -1) - } - break inner - } - - // Get the next character. - value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) - - // Check for a low surrogate area. - if value2&0xFC00 != 0xDC00 { - return yaml_parser_set_reader_error(parser, - "expected low surrogate area", - parser.offset+2, int(value2)) - } - - // Generate the value of the surrogate pair. - value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) - } else { - width = 2 - } - - default: - panic("impossible") - } - - // Check if the character is in the allowed range: - // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) - // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) - // | [#x10000-#x10FFFF] (32 bit) - switch { - case value == 0x09: - case value == 0x0A: - case value == 0x0D: - case value >= 0x20 && value <= 0x7E: - case value == 0x85: - case value >= 0xA0 && value <= 0xD7FF: - case value >= 0xE000 && value <= 0xFFFD: - case value >= 0x10000 && value <= 0x10FFFF: - default: - return yaml_parser_set_reader_error(parser, - "control characters are not allowed", - parser.offset, int(value)) - } - - // Move the raw pointers. - parser.raw_buffer_pos += width - parser.offset += width - - // Finally put the character into the buffer. - if value <= 0x7F { - // 0000 0000-0000 007F . 0xxxxxxx - parser.buffer[buffer_len+0] = byte(value) - buffer_len += 1 - } else if value <= 0x7FF { - // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) - parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) - buffer_len += 2 - } else if value <= 0xFFFF { - // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) - parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) - buffer_len += 3 - } else { - // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) - parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) - parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) - buffer_len += 4 - } - - parser.unread++ - } - - // On EOF, put NUL into the buffer and return. - if parser.eof { - parser.buffer[buffer_len] = 0 - buffer_len++ - parser.unread++ - break - } - } - // [Go] Read the documentation of this function above. To return true, - // we need to have the given length in the buffer. Not doing that means - // every single check that calls this function to make sure the buffer - // has a given length is Go) panicking; or C) accessing invalid memory. - // This happens here due to the EOF above breaking early. - for buffer_len < length { - parser.buffer[buffer_len] = 0 - buffer_len++ - } - parser.buffer = parser.buffer[:buffer_len] - return true -} diff --git a/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/resolve.go b/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/resolve.go deleted file mode 100644 index 6c151db6fbd..00000000000 --- a/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/resolve.go +++ /dev/null @@ -1,258 +0,0 @@ -package yaml - -import ( - "encoding/base64" - "math" - "regexp" - "strconv" - "strings" - "time" -) - -type resolveMapItem struct { - value interface{} - tag string -} - -var resolveTable = make([]byte, 256) -var resolveMap = make(map[string]resolveMapItem) - -func init() { - t := resolveTable - t[int('+')] = 'S' // Sign - t[int('-')] = 'S' - for _, c := range "0123456789" { - t[int(c)] = 'D' // Digit - } - for _, c := range "yYnNtTfFoO~" { - t[int(c)] = 'M' // In map - } - t[int('.')] = '.' // Float (potentially in map) - - var resolveMapList = []struct { - v interface{} - tag string - l []string - }{ - {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, - {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, - {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, - {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, - {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, - {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, - {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, - {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, - {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, - {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, - {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, - {"<<", yaml_MERGE_TAG, []string{"<<"}}, - } - - m := resolveMap - for _, item := range resolveMapList { - for _, s := range item.l { - m[s] = resolveMapItem{item.v, item.tag} - } - } -} - -const longTagPrefix = "tag:yaml.org,2002:" - -func shortTag(tag string) string { - // TODO This can easily be made faster and produce less garbage. - if strings.HasPrefix(tag, longTagPrefix) { - return "!!" + tag[len(longTagPrefix):] - } - return tag -} - -func longTag(tag string) string { - if strings.HasPrefix(tag, "!!") { - return longTagPrefix + tag[2:] - } - return tag -} - -func resolvableTag(tag string) bool { - switch tag { - case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG: - return true - } - return false -} - -var yamlStyleFloat = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+][0-9]+)?$`) - -func resolve(tag string, in string) (rtag string, out interface{}) { - if !resolvableTag(tag) { - return tag, in - } - - defer func() { - switch tag { - case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: - return - case yaml_FLOAT_TAG: - if rtag == yaml_INT_TAG { - switch v := out.(type) { - case int64: - rtag = yaml_FLOAT_TAG - out = float64(v) - return - case int: - rtag = yaml_FLOAT_TAG - out = float64(v) - return - } - } - } - failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) - }() - - // Any data is accepted as a !!str or !!binary. - // Otherwise, the prefix is enough of a hint about what it might be. - hint := byte('N') - if in != "" { - hint = resolveTable[in[0]] - } - if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { - // Handle things we can lookup in a map. - if item, ok := resolveMap[in]; ok { - return item.tag, item.value - } - - // Base 60 floats are a bad idea, were dropped in YAML 1.2, and - // are purposefully unsupported here. They're still quoted on - // the way out for compatibility with other parser, though. - - switch hint { - case 'M': - // We've already checked the map above. - - case '.': - // Not in the map, so maybe a normal float. - floatv, err := strconv.ParseFloat(in, 64) - if err == nil { - return yaml_FLOAT_TAG, floatv - } - - case 'D', 'S': - // Int, float, or timestamp. - // Only try values as a timestamp if the value is unquoted or there's an explicit - // !!timestamp tag. - if tag == "" || tag == yaml_TIMESTAMP_TAG { - t, ok := parseTimestamp(in) - if ok { - return yaml_TIMESTAMP_TAG, t - } - } - - plain := strings.Replace(in, "_", "", -1) - intv, err := strconv.ParseInt(plain, 0, 64) - if err == nil { - if intv == int64(int(intv)) { - return yaml_INT_TAG, int(intv) - } else { - return yaml_INT_TAG, intv - } - } - uintv, err := strconv.ParseUint(plain, 0, 64) - if err == nil { - return yaml_INT_TAG, uintv - } - if yamlStyleFloat.MatchString(plain) { - floatv, err := strconv.ParseFloat(plain, 64) - if err == nil { - return yaml_FLOAT_TAG, floatv - } - } - if strings.HasPrefix(plain, "0b") { - intv, err := strconv.ParseInt(plain[2:], 2, 64) - if err == nil { - if intv == int64(int(intv)) { - return yaml_INT_TAG, int(intv) - } else { - return yaml_INT_TAG, intv - } - } - uintv, err := strconv.ParseUint(plain[2:], 2, 64) - if err == nil { - return yaml_INT_TAG, uintv - } - } else if strings.HasPrefix(plain, "-0b") { - intv, err := strconv.ParseInt("-" + plain[3:], 2, 64) - if err == nil { - if true || intv == int64(int(intv)) { - return yaml_INT_TAG, int(intv) - } else { - return yaml_INT_TAG, intv - } - } - } - default: - panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") - } - } - return yaml_STR_TAG, in -} - -// encodeBase64 encodes s as base64 that is broken up into multiple lines -// as appropriate for the resulting length. -func encodeBase64(s string) string { - const lineLen = 70 - encLen := base64.StdEncoding.EncodedLen(len(s)) - lines := encLen/lineLen + 1 - buf := make([]byte, encLen*2+lines) - in := buf[0:encLen] - out := buf[encLen:] - base64.StdEncoding.Encode(in, []byte(s)) - k := 0 - for i := 0; i < len(in); i += lineLen { - j := i + lineLen - if j > len(in) { - j = len(in) - } - k += copy(out[k:], in[i:j]) - if lines > 1 { - out[k] = '\n' - k++ - } - } - return string(out[:k]) -} - -// This is a subset of the formats allowed by the regular expression -// defined at http://yaml.org/type/timestamp.html. -var allowedTimestampFormats = []string{ - "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. - "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". - "2006-1-2 15:4:5.999999999", // space separated with no time zone - "2006-1-2", // date only - // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" - // from the set of examples. -} - -// parseTimestamp parses s as a timestamp string and -// returns the timestamp and reports whether it succeeded. -// Timestamp formats are defined at http://yaml.org/type/timestamp.html -func parseTimestamp(s string) (time.Time, bool) { - // TODO write code to check all the formats supported by - // http://yaml.org/type/timestamp.html instead of using time.Parse. - - // Quick check: all date formats start with YYYY-. - i := 0 - for ; i < len(s); i++ { - if c := s[i]; c < '0' || c > '9' { - break - } - } - if i != 4 || i == len(s) || s[i] != '-' { - return time.Time{}, false - } - for _, format := range allowedTimestampFormats { - if t, err := time.Parse(format, s); err == nil { - return t, true - } - } - return time.Time{}, false -} diff --git a/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/scannerc.go b/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/scannerc.go deleted file mode 100644 index 077fd1dd2d4..00000000000 --- a/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/scannerc.go +++ /dev/null @@ -1,2696 +0,0 @@ -package yaml - -import ( - "bytes" - "fmt" -) - -// Introduction -// ************ -// -// The following notes assume that you are familiar with the YAML specification -// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in -// some cases we are less restrictive that it requires. -// -// The process of transforming a YAML stream into a sequence of events is -// divided on two steps: Scanning and Parsing. -// -// The Scanner transforms the input stream into a sequence of tokens, while the -// parser transform the sequence of tokens produced by the Scanner into a -// sequence of parsing events. -// -// The Scanner is rather clever and complicated. The Parser, on the contrary, -// is a straightforward implementation of a recursive-descendant parser (or, -// LL(1) parser, as it is usually called). -// -// Actually there are two issues of Scanning that might be called "clever", the -// rest is quite straightforward. The issues are "block collection start" and -// "simple keys". Both issues are explained below in details. -// -// Here the Scanning step is explained and implemented. We start with the list -// of all the tokens produced by the Scanner together with short descriptions. -// -// Now, tokens: -// -// STREAM-START(encoding) # The stream start. -// STREAM-END # The stream end. -// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. -// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. -// DOCUMENT-START # '---' -// DOCUMENT-END # '...' -// BLOCK-SEQUENCE-START # Indentation increase denoting a block -// BLOCK-MAPPING-START # sequence or a block mapping. -// BLOCK-END # Indentation decrease. -// FLOW-SEQUENCE-START # '[' -// FLOW-SEQUENCE-END # ']' -// BLOCK-SEQUENCE-START # '{' -// BLOCK-SEQUENCE-END # '}' -// BLOCK-ENTRY # '-' -// FLOW-ENTRY # ',' -// KEY # '?' or nothing (simple keys). -// VALUE # ':' -// ALIAS(anchor) # '*anchor' -// ANCHOR(anchor) # '&anchor' -// TAG(handle,suffix) # '!handle!suffix' -// SCALAR(value,style) # A scalar. -// -// The following two tokens are "virtual" tokens denoting the beginning and the -// end of the stream: -// -// STREAM-START(encoding) -// STREAM-END -// -// We pass the information about the input stream encoding with the -// STREAM-START token. -// -// The next two tokens are responsible for tags: -// -// VERSION-DIRECTIVE(major,minor) -// TAG-DIRECTIVE(handle,prefix) -// -// Example: -// -// %YAML 1.1 -// %TAG ! !foo -// %TAG !yaml! tag:yaml.org,2002: -// --- -// -// The correspoding sequence of tokens: -// -// STREAM-START(utf-8) -// VERSION-DIRECTIVE(1,1) -// TAG-DIRECTIVE("!","!foo") -// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") -// DOCUMENT-START -// STREAM-END -// -// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole -// line. -// -// The document start and end indicators are represented by: -// -// DOCUMENT-START -// DOCUMENT-END -// -// Note that if a YAML stream contains an implicit document (without '---' -// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be -// produced. -// -// In the following examples, we present whole documents together with the -// produced tokens. -// -// 1. An implicit document: -// -// 'a scalar' -// -// Tokens: -// -// STREAM-START(utf-8) -// SCALAR("a scalar",single-quoted) -// STREAM-END -// -// 2. An explicit document: -// -// --- -// 'a scalar' -// ... -// -// Tokens: -// -// STREAM-START(utf-8) -// DOCUMENT-START -// SCALAR("a scalar",single-quoted) -// DOCUMENT-END -// STREAM-END -// -// 3. Several documents in a stream: -// -// 'a scalar' -// --- -// 'another scalar' -// --- -// 'yet another scalar' -// -// Tokens: -// -// STREAM-START(utf-8) -// SCALAR("a scalar",single-quoted) -// DOCUMENT-START -// SCALAR("another scalar",single-quoted) -// DOCUMENT-START -// SCALAR("yet another scalar",single-quoted) -// STREAM-END -// -// We have already introduced the SCALAR token above. The following tokens are -// used to describe aliases, anchors, tag, and scalars: -// -// ALIAS(anchor) -// ANCHOR(anchor) -// TAG(handle,suffix) -// SCALAR(value,style) -// -// The following series of examples illustrate the usage of these tokens: -// -// 1. A recursive sequence: -// -// &A [ *A ] -// -// Tokens: -// -// STREAM-START(utf-8) -// ANCHOR("A") -// FLOW-SEQUENCE-START -// ALIAS("A") -// FLOW-SEQUENCE-END -// STREAM-END -// -// 2. A tagged scalar: -// -// !!float "3.14" # A good approximation. -// -// Tokens: -// -// STREAM-START(utf-8) -// TAG("!!","float") -// SCALAR("3.14",double-quoted) -// STREAM-END -// -// 3. Various scalar styles: -// -// --- # Implicit empty plain scalars do not produce tokens. -// --- a plain scalar -// --- 'a single-quoted scalar' -// --- "a double-quoted scalar" -// --- |- -// a literal scalar -// --- >- -// a folded -// scalar -// -// Tokens: -// -// STREAM-START(utf-8) -// DOCUMENT-START -// DOCUMENT-START -// SCALAR("a plain scalar",plain) -// DOCUMENT-START -// SCALAR("a single-quoted scalar",single-quoted) -// DOCUMENT-START -// SCALAR("a double-quoted scalar",double-quoted) -// DOCUMENT-START -// SCALAR("a literal scalar",literal) -// DOCUMENT-START -// SCALAR("a folded scalar",folded) -// STREAM-END -// -// Now it's time to review collection-related tokens. We will start with -// flow collections: -// -// FLOW-SEQUENCE-START -// FLOW-SEQUENCE-END -// FLOW-MAPPING-START -// FLOW-MAPPING-END -// FLOW-ENTRY -// KEY -// VALUE -// -// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and -// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' -// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the -// indicators '?' and ':', which are used for denoting mapping keys and values, -// are represented by the KEY and VALUE tokens. -// -// The following examples show flow collections: -// -// 1. A flow sequence: -// -// [item 1, item 2, item 3] -// -// Tokens: -// -// STREAM-START(utf-8) -// FLOW-SEQUENCE-START -// SCALAR("item 1",plain) -// FLOW-ENTRY -// SCALAR("item 2",plain) -// FLOW-ENTRY -// SCALAR("item 3",plain) -// FLOW-SEQUENCE-END -// STREAM-END -// -// 2. A flow mapping: -// -// { -// a simple key: a value, # Note that the KEY token is produced. -// ? a complex key: another value, -// } -// -// Tokens: -// -// STREAM-START(utf-8) -// FLOW-MAPPING-START -// KEY -// SCALAR("a simple key",plain) -// VALUE -// SCALAR("a value",plain) -// FLOW-ENTRY -// KEY -// SCALAR("a complex key",plain) -// VALUE -// SCALAR("another value",plain) -// FLOW-ENTRY -// FLOW-MAPPING-END -// STREAM-END -// -// A simple key is a key which is not denoted by the '?' indicator. Note that -// the Scanner still produce the KEY token whenever it encounters a simple key. -// -// For scanning block collections, the following tokens are used (note that we -// repeat KEY and VALUE here): -// -// BLOCK-SEQUENCE-START -// BLOCK-MAPPING-START -// BLOCK-END -// BLOCK-ENTRY -// KEY -// VALUE -// -// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation -// increase that precedes a block collection (cf. the INDENT token in Python). -// The token BLOCK-END denote indentation decrease that ends a block collection -// (cf. the DEDENT token in Python). However YAML has some syntax pecularities -// that makes detections of these tokens more complex. -// -// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators -// '-', '?', and ':' correspondingly. -// -// The following examples show how the tokens BLOCK-SEQUENCE-START, -// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: -// -// 1. Block sequences: -// -// - item 1 -// - item 2 -// - -// - item 3.1 -// - item 3.2 -// - -// key 1: value 1 -// key 2: value 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-ENTRY -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 3.1",plain) -// BLOCK-ENTRY -// SCALAR("item 3.2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// 2. Block mappings: -// -// a simple key: a value # The KEY token is produced here. -// ? a complex key -// : another value -// a mapping: -// key 1: value 1 -// key 2: value 2 -// a sequence: -// - item 1 -// - item 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("a simple key",plain) -// VALUE -// SCALAR("a value",plain) -// KEY -// SCALAR("a complex key",plain) -// VALUE -// SCALAR("another value",plain) -// KEY -// SCALAR("a mapping",plain) -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// KEY -// SCALAR("a sequence",plain) -// VALUE -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// YAML does not always require to start a new block collection from a new -// line. If the current line contains only '-', '?', and ':' indicators, a new -// block collection may start at the current line. The following examples -// illustrate this case: -// -// 1. Collections in a sequence: -// -// - - item 1 -// - item 2 -// - key 1: value 1 -// key 2: value 2 -// - ? complex key -// : complex value -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("complex key") -// VALUE -// SCALAR("complex value") -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// 2. Collections in a mapping: -// -// ? a sequence -// : - item 1 -// - item 2 -// ? a mapping -// : key 1: value 1 -// key 2: value 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("a sequence",plain) -// VALUE -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// KEY -// SCALAR("a mapping",plain) -// VALUE -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// YAML also permits non-indented sequences if they are included into a block -// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: -// -// key: -// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. -// - item 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("key",plain) -// VALUE -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// - -// Ensure that the buffer contains the required number of characters. -// Return true on success, false on failure (reader error or memory error). -func cache(parser *yaml_parser_t, length int) bool { - // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) - return parser.unread >= length || yaml_parser_update_buffer(parser, length) -} - -// Advance the buffer pointer. -func skip(parser *yaml_parser_t) { - parser.mark.index++ - parser.mark.column++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) -} - -func skip_line(parser *yaml_parser_t) { - if is_crlf(parser.buffer, parser.buffer_pos) { - parser.mark.index += 2 - parser.mark.column = 0 - parser.mark.line++ - parser.unread -= 2 - parser.buffer_pos += 2 - } else if is_break(parser.buffer, parser.buffer_pos) { - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) - } -} - -// Copy a character to a string buffer and advance pointers. -func read(parser *yaml_parser_t, s []byte) []byte { - w := width(parser.buffer[parser.buffer_pos]) - if w == 0 { - panic("invalid character sequence") - } - if len(s) == 0 { - s = make([]byte, 0, 32) - } - if w == 1 && len(s)+w <= cap(s) { - s = s[:len(s)+1] - s[len(s)-1] = parser.buffer[parser.buffer_pos] - parser.buffer_pos++ - } else { - s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) - parser.buffer_pos += w - } - parser.mark.index++ - parser.mark.column++ - parser.unread-- - return s -} - -// Copy a line break character to a string buffer and advance pointers. -func read_line(parser *yaml_parser_t, s []byte) []byte { - buf := parser.buffer - pos := parser.buffer_pos - switch { - case buf[pos] == '\r' && buf[pos+1] == '\n': - // CR LF . LF - s = append(s, '\n') - parser.buffer_pos += 2 - parser.mark.index++ - parser.unread-- - case buf[pos] == '\r' || buf[pos] == '\n': - // CR|LF . LF - s = append(s, '\n') - parser.buffer_pos += 1 - case buf[pos] == '\xC2' && buf[pos+1] == '\x85': - // NEL . LF - s = append(s, '\n') - parser.buffer_pos += 2 - case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): - // LS|PS . LS|PS - s = append(s, buf[parser.buffer_pos:pos+3]...) - parser.buffer_pos += 3 - default: - return s - } - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - return s -} - -// Get the next token. -func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { - // Erase the token object. - *token = yaml_token_t{} // [Go] Is this necessary? - - // No tokens after STREAM-END or error. - if parser.stream_end_produced || parser.error != yaml_NO_ERROR { - return true - } - - // Ensure that the tokens queue contains enough tokens. - if !parser.token_available { - if !yaml_parser_fetch_more_tokens(parser) { - return false - } - } - - // Fetch the next token from the queue. - *token = parser.tokens[parser.tokens_head] - parser.tokens_head++ - parser.tokens_parsed++ - parser.token_available = false - - if token.typ == yaml_STREAM_END_TOKEN { - parser.stream_end_produced = true - } - return true -} - -// Set the scanner error and return false. -func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { - parser.error = yaml_SCANNER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = parser.mark - return false -} - -func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { - context := "while parsing a tag" - if directive { - context = "while parsing a %TAG directive" - } - return yaml_parser_set_scanner_error(parser, context, context_mark, problem) -} - -func trace(args ...interface{}) func() { - pargs := append([]interface{}{"+++"}, args...) - fmt.Println(pargs...) - pargs = append([]interface{}{"---"}, args...) - return func() { fmt.Println(pargs...) } -} - -// Ensure that the tokens queue contains at least one token which can be -// returned to the Parser. -func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { - // While we need more tokens to fetch, do it. - for { - // Check if we really need to fetch more tokens. - need_more_tokens := false - - if parser.tokens_head == len(parser.tokens) { - // Queue is empty. - need_more_tokens = true - } else { - // Check if any potential simple key may occupy the head position. - if !yaml_parser_stale_simple_keys(parser) { - return false - } - - for i := range parser.simple_keys { - simple_key := &parser.simple_keys[i] - if simple_key.possible && simple_key.token_number == parser.tokens_parsed { - need_more_tokens = true - break - } - } - } - - // We are finished. - if !need_more_tokens { - break - } - // Fetch the next token. - if !yaml_parser_fetch_next_token(parser) { - return false - } - } - - parser.token_available = true - return true -} - -// The dispatcher for token fetchers. -func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { - // Ensure that the buffer is initialized. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check if we just started scanning. Fetch STREAM-START then. - if !parser.stream_start_produced { - return yaml_parser_fetch_stream_start(parser) - } - - // Eat whitespaces and comments until we reach the next token. - if !yaml_parser_scan_to_next_token(parser) { - return false - } - - // Remove obsolete potential simple keys. - if !yaml_parser_stale_simple_keys(parser) { - return false - } - - // Check the indentation level against the current column. - if !yaml_parser_unroll_indent(parser, parser.mark.column) { - return false - } - - // Ensure that the buffer contains at least 4 characters. 4 is the length - // of the longest indicators ('--- ' and '... '). - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - - // Is it the end of the stream? - if is_z(parser.buffer, parser.buffer_pos) { - return yaml_parser_fetch_stream_end(parser) - } - - // Is it a directive? - if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { - return yaml_parser_fetch_directive(parser) - } - - buf := parser.buffer - pos := parser.buffer_pos - - // Is it the document start indicator? - if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) - } - - // Is it the document end indicator? - if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) - } - - // Is it the flow sequence start indicator? - if buf[pos] == '[' { - return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) - } - - // Is it the flow mapping start indicator? - if parser.buffer[parser.buffer_pos] == '{' { - return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) - } - - // Is it the flow sequence end indicator? - if parser.buffer[parser.buffer_pos] == ']' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_SEQUENCE_END_TOKEN) - } - - // Is it the flow mapping end indicator? - if parser.buffer[parser.buffer_pos] == '}' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_MAPPING_END_TOKEN) - } - - // Is it the flow entry indicator? - if parser.buffer[parser.buffer_pos] == ',' { - return yaml_parser_fetch_flow_entry(parser) - } - - // Is it the block entry indicator? - if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { - return yaml_parser_fetch_block_entry(parser) - } - - // Is it the key indicator? - if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_key(parser) - } - - // Is it the value indicator? - if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_value(parser) - } - - // Is it an alias? - if parser.buffer[parser.buffer_pos] == '*' { - return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) - } - - // Is it an anchor? - if parser.buffer[parser.buffer_pos] == '&' { - return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) - } - - // Is it a tag? - if parser.buffer[parser.buffer_pos] == '!' { - return yaml_parser_fetch_tag(parser) - } - - // Is it a literal scalar? - if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, true) - } - - // Is it a folded scalar? - if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, false) - } - - // Is it a single-quoted scalar? - if parser.buffer[parser.buffer_pos] == '\'' { - return yaml_parser_fetch_flow_scalar(parser, true) - } - - // Is it a double-quoted scalar? - if parser.buffer[parser.buffer_pos] == '"' { - return yaml_parser_fetch_flow_scalar(parser, false) - } - - // Is it a plain scalar? - // - // A plain scalar may start with any non-blank characters except - // - // '-', '?', ':', ',', '[', ']', '{', '}', - // '#', '&', '*', '!', '|', '>', '\'', '\"', - // '%', '@', '`'. - // - // In the block context (and, for the '-' indicator, in the flow context - // too), it may also start with the characters - // - // '-', '?', ':' - // - // if it is followed by a non-space character. - // - // The last rule is more restrictive than the specification requires. - // [Go] Make this logic more reasonable. - //switch parser.buffer[parser.buffer_pos] { - //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': - //} - if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || - parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || - parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || - parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || - parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || - parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || - parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || - parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || - parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || - (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || - (parser.flow_level == 0 && - (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && - !is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_plain_scalar(parser) - } - - // If we don't determine the token type so far, it is an error. - return yaml_parser_set_scanner_error(parser, - "while scanning for the next token", parser.mark, - "found character that cannot start any token") -} - -// Check the list of potential simple keys and remove the positions that -// cannot contain simple keys anymore. -func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { - // Check for a potential simple key for each flow level. - for i := range parser.simple_keys { - simple_key := &parser.simple_keys[i] - - // The specification requires that a simple key - // - // - is limited to a single line, - // - is shorter than 1024 characters. - if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) { - - // Check if the potential simple key to be removed is required. - if simple_key.required { - return yaml_parser_set_scanner_error(parser, - "while scanning a simple key", simple_key.mark, - "could not find expected ':'") - } - simple_key.possible = false - } - } - return true -} - -// Check if a simple key may start at the current position and add it if -// needed. -func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { - // A simple key is required at the current position if the scanner is in - // the block context and the current column coincides with the indentation - // level. - - required := parser.flow_level == 0 && parser.indent == parser.mark.column - - // - // If the current position may start a simple key, save it. - // - if parser.simple_key_allowed { - simple_key := yaml_simple_key_t{ - possible: true, - required: required, - token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), - } - simple_key.mark = parser.mark - - if !yaml_parser_remove_simple_key(parser) { - return false - } - parser.simple_keys[len(parser.simple_keys)-1] = simple_key - } - return true -} - -// Remove a potential simple key at the current flow level. -func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { - i := len(parser.simple_keys) - 1 - if parser.simple_keys[i].possible { - // If the key is required, it is an error. - if parser.simple_keys[i].required { - return yaml_parser_set_scanner_error(parser, - "while scanning a simple key", parser.simple_keys[i].mark, - "could not find expected ':'") - } - } - // Remove the key from the stack. - parser.simple_keys[i].possible = false - return true -} - -// Increase the flow level and resize the simple key list if needed. -func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { - // Reset the simple key on the next level. - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) - - // Increase the flow level. - parser.flow_level++ - return true -} - -// Decrease the flow level. -func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { - if parser.flow_level > 0 { - parser.flow_level-- - parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] - } - return true -} - -// Push the current indentation level to the stack and set the new level -// the current column is greater than the indentation level. In this case, -// append or insert the specified token into the token queue. -func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { - // In the flow context, do nothing. - if parser.flow_level > 0 { - return true - } - - if parser.indent < column { - // Push the current indentation level to the stack and set the new - // indentation level. - parser.indents = append(parser.indents, parser.indent) - parser.indent = column - - // Create a token and insert it into the queue. - token := yaml_token_t{ - typ: typ, - start_mark: mark, - end_mark: mark, - } - if number > -1 { - number -= parser.tokens_parsed - } - yaml_insert_token(parser, number, &token) - } - return true -} - -// Pop indentation levels from the indents stack until the current level -// becomes less or equal to the column. For each indentation level, append -// the BLOCK-END token. -func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { - // In the flow context, do nothing. - if parser.flow_level > 0 { - return true - } - - // Loop through the indentation levels in the stack. - for parser.indent > column { - // Create a token and append it to the queue. - token := yaml_token_t{ - typ: yaml_BLOCK_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - yaml_insert_token(parser, -1, &token) - - // Pop the indentation level. - parser.indent = parser.indents[len(parser.indents)-1] - parser.indents = parser.indents[:len(parser.indents)-1] - } - return true -} - -// Initialize the scanner and produce the STREAM-START token. -func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { - - // Set the initial indentation. - parser.indent = -1 - - // Initialize the simple key stack. - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) - - // A simple key is allowed at the beginning of the stream. - parser.simple_key_allowed = true - - // We have started. - parser.stream_start_produced = true - - // Create the STREAM-START token and append it to the queue. - token := yaml_token_t{ - typ: yaml_STREAM_START_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - encoding: parser.encoding, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the STREAM-END token and shut down the scanner. -func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { - - // Force new line. - if parser.mark.column != 0 { - parser.mark.column = 0 - parser.mark.line++ - } - - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Create the STREAM-END token and append it to the queue. - token := yaml_token_t{ - typ: yaml_STREAM_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. -func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. - token := yaml_token_t{} - if !yaml_parser_scan_directive(parser, &token) { - return false - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the DOCUMENT-START or DOCUMENT-END token. -func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Consume the token. - start_mark := parser.mark - - skip(parser) - skip(parser) - skip(parser) - - end_mark := parser.mark - - // Create the DOCUMENT-START or DOCUMENT-END token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. -func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // The indicators '[' and '{' may start a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // Increase the flow level. - if !yaml_parser_increase_flow_level(parser) { - return false - } - - // A simple key may follow the indicators '[' and '{'. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. -func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // Reset any potential simple key on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Decrease the flow level. - if !yaml_parser_decrease_flow_level(parser) { - return false - } - - // No simple keys after the indicators ']' and '}'. - parser.simple_key_allowed = false - - // Consume the token. - - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-ENTRY token. -func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after ','. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-ENTRY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_FLOW_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the BLOCK-ENTRY token. -func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { - // Check if the scanner is in the block context. - if parser.flow_level == 0 { - // Check if we are allowed to start a new entry. - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "block sequence entries are not allowed in this context") - } - // Add the BLOCK-SEQUENCE-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { - return false - } - } else { - // It is an error for the '-' indicator to occur in the flow context, - // but we let the Parser detect and report about it because the Parser - // is able to point to the context. - } - - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after '-'. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the BLOCK-ENTRY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_BLOCK_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the KEY token. -func yaml_parser_fetch_key(parser *yaml_parser_t) bool { - - // In the block context, additional checks are required. - if parser.flow_level == 0 { - // Check if we are allowed to start a new key (not nessesary simple). - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping keys are not allowed in this context") - } - // Add the BLOCK-MAPPING-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after '?' in the block context. - parser.simple_key_allowed = parser.flow_level == 0 - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the KEY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_KEY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the VALUE token. -func yaml_parser_fetch_value(parser *yaml_parser_t) bool { - - simple_key := &parser.simple_keys[len(parser.simple_keys)-1] - - // Have we found a simple key? - if simple_key.possible { - // Create the KEY token and insert it into the queue. - token := yaml_token_t{ - typ: yaml_KEY_TOKEN, - start_mark: simple_key.mark, - end_mark: simple_key.mark, - } - yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) - - // In the block context, we may need to add the BLOCK-MAPPING-START token. - if !yaml_parser_roll_indent(parser, simple_key.mark.column, - simple_key.token_number, - yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { - return false - } - - // Remove the simple key. - simple_key.possible = false - - // A simple key cannot follow another simple key. - parser.simple_key_allowed = false - - } else { - // The ':' indicator follows a complex key. - - // In the block context, extra checks are required. - if parser.flow_level == 0 { - - // Check if we are allowed to start a complex value. - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping values are not allowed in this context") - } - - // Add the BLOCK-MAPPING-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - // Simple keys after ':' are allowed in the block context. - parser.simple_key_allowed = parser.flow_level == 0 - } - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the VALUE token and append it to the queue. - token := yaml_token_t{ - typ: yaml_VALUE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the ALIAS or ANCHOR token. -func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // An anchor or an alias could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow an anchor or an alias. - parser.simple_key_allowed = false - - // Create the ALIAS or ANCHOR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_anchor(parser, &token, typ) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the TAG token. -func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { - // A tag could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a tag. - parser.simple_key_allowed = false - - // Create the TAG token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_tag(parser, &token) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. -func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { - // Remove any potential simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // A simple key may follow a block scalar. - parser.simple_key_allowed = true - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_block_scalar(parser, &token, literal) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. -func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { - // A plain scalar could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a flow scalar. - parser.simple_key_allowed = false - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_flow_scalar(parser, &token, single) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,plain) token. -func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { - // A plain scalar could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a flow scalar. - parser.simple_key_allowed = false - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_plain_scalar(parser, &token) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Eat whitespaces and comments until the next token is found. -func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { - - // Until the next token is not found. - for { - // Allow the BOM mark to start a line. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { - skip(parser) - } - - // Eat whitespaces. - // Tabs are allowed: - // - in the flow context - // - in the block context, but not at the beginning of the line or - // after '-', '?', or ':' (complex value). - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Eat a comment until a line break. - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // If it is a line break, eat it. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - - // In the block context, a new line may start a simple key. - if parser.flow_level == 0 { - parser.simple_key_allowed = true - } - } else { - break // We have found a token. - } - } - - return true -} - -// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// -func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { - // Eat '%'. - start_mark := parser.mark - skip(parser) - - // Scan the directive name. - var name []byte - if !yaml_parser_scan_directive_name(parser, start_mark, &name) { - return false - } - - // Is it a YAML directive? - if bytes.Equal(name, []byte("YAML")) { - // Scan the VERSION directive value. - var major, minor int8 - if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { - return false - } - end_mark := parser.mark - - // Create a VERSION-DIRECTIVE token. - *token = yaml_token_t{ - typ: yaml_VERSION_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - major: major, - minor: minor, - } - - // Is it a TAG directive? - } else if bytes.Equal(name, []byte("TAG")) { - // Scan the TAG directive value. - var handle, prefix []byte - if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { - return false - } - end_mark := parser.mark - - // Create a TAG-DIRECTIVE token. - *token = yaml_token_t{ - typ: yaml_TAG_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - prefix: prefix, - } - - // Unknown directive. - } else { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found unknown directive name") - return false - } - - // Eat the rest of the line including any comments. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // Check if we are at the end of the line. - if !is_breakz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "did not find expected comment or line break") - return false - } - - // Eat a line break. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } - - return true -} - -// Scan the directive name. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^ -// -func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { - // Consume the directive name. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - var s []byte - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the name is empty. - if len(s) == 0 { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "could not find expected directive name") - return false - } - - // Check for an blank character after the name. - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found unexpected non-alphabetical character") - return false - } - *name = s - return true -} - -// Scan the value of VERSION-DIRECTIVE. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^ -func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { - // Eat whitespaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Consume the major version number. - if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { - return false - } - - // Eat '.'. - if parser.buffer[parser.buffer_pos] != '.' { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected digit or '.' character") - } - - skip(parser) - - // Consume the minor version number. - if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { - return false - } - return true -} - -const max_number_length = 2 - -// Scan the version number of VERSION-DIRECTIVE. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^ -// %YAML 1.1 # a comment \n -// ^ -func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { - - // Repeat while the next character is digit. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - var value, length int8 - for is_digit(parser.buffer, parser.buffer_pos) { - // Check if the number is too long. - length++ - if length > max_number_length { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "found extremely long version number") - } - value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the number was present. - if length == 0 { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected version number") - } - *number = value - return true -} - -// Scan the value of a TAG-DIRECTIVE token. -// -// Scope: -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// -func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { - var handle_value, prefix_value []byte - - // Eat whitespaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Scan a handle. - if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { - return false - } - - // Expect a whitespace. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blank(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace") - return false - } - - // Eat whitespaces. - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Scan a prefix. - if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { - return false - } - - // Expect a whitespace or line break. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace or line break") - return false - } - - *handle = handle_value - *prefix = prefix_value - return true -} - -func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { - var s []byte - - // Eat the indicator character. - start_mark := parser.mark - skip(parser) - - // Consume the value. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - end_mark := parser.mark - - /* - * Check if length of the anchor is greater than 0 and it is followed by - * a whitespace character or one of the indicators: - * - * '?', ':', ',', ']', '}', '%', '@', '`'. - */ - - if len(s) == 0 || - !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || - parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || - parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || - parser.buffer[parser.buffer_pos] == '`') { - context := "while scanning an alias" - if typ == yaml_ANCHOR_TOKEN { - context = "while scanning an anchor" - } - yaml_parser_set_scanner_error(parser, context, start_mark, - "did not find expected alphabetic or numeric character") - return false - } - - // Create a token. - *token = yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - value: s, - } - - return true -} - -/* - * Scan a TAG token. - */ - -func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { - var handle, suffix []byte - - start_mark := parser.mark - - // Check if the tag is in the canonical form. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - if parser.buffer[parser.buffer_pos+1] == '<' { - // Keep the handle as '' - - // Eat '!<' - skip(parser) - skip(parser) - - // Consume the tag value. - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - - // Check for '>' and eat it. - if parser.buffer[parser.buffer_pos] != '>' { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find the expected '>'") - return false - } - - skip(parser) - } else { - // The tag has either the '!suffix' or the '!handle!suffix' form. - - // First, try to scan a handle. - if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { - return false - } - - // Check if it is, indeed, handle. - if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { - // Scan the suffix now. - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - } else { - // It wasn't a handle after all. Scan the rest of the tag. - if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { - return false - } - - // Set the handle to '!'. - handle = []byte{'!'} - - // A special case: the '!' tag. Set the handle to '' and the - // suffix to '!'. - if len(suffix) == 0 { - handle, suffix = suffix, handle - } - } - } - - // Check the character which ends the tag. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find expected whitespace or line break") - return false - } - - end_mark := parser.mark - - // Create a token. - *token = yaml_token_t{ - typ: yaml_TAG_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - suffix: suffix, - } - return true -} - -// Scan a tag handle. -func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { - // Check the initial '!' character. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.buffer[parser.buffer_pos] != '!' { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - - var s []byte - - // Copy the '!' character. - s = read(parser, s) - - // Copy all subsequent alphabetical and numerical characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the trailing character is '!' and copy it. - if parser.buffer[parser.buffer_pos] == '!' { - s = read(parser, s) - } else { - // It's either the '!' tag or not really a tag handle. If it's a %TAG - // directive, it's an error. If it's a tag token, it must be a part of URI. - if directive && string(s) != "!" { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - } - - *handle = s - return true -} - -// Scan a tag. -func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { - //size_t length = head ? strlen((char *)head) : 0 - var s []byte - hasTag := len(head) > 0 - - // Copy the head if needed. - // - // Note that we don't copy the leading '!' character. - if len(head) > 1 { - s = append(s, head[1:]...) - } - - // Scan the tag. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // The set of characters that may appear in URI is as follows: - // - // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', - // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', - // '%'. - // [Go] Convert this into more reasonable logic. - for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || - parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || - parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || - parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || - parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || - parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || - parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || - parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || - parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || - parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || - parser.buffer[parser.buffer_pos] == '%' { - // Check if it is a URI-escape sequence. - if parser.buffer[parser.buffer_pos] == '%' { - if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { - return false - } - } else { - s = read(parser, s) - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - hasTag = true - } - - if !hasTag { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected tag URI") - return false - } - *uri = s - return true -} - -// Decode an URI-escape sequence corresponding to a single UTF-8 character. -func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { - - // Decode the required number of characters. - w := 1024 - for w > 0 { - // Check for a URI-escaped octet. - if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { - return false - } - - if !(parser.buffer[parser.buffer_pos] == '%' && - is_hex(parser.buffer, parser.buffer_pos+1) && - is_hex(parser.buffer, parser.buffer_pos+2)) { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find URI escaped octet") - } - - // Get the octet. - octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) - - // If it is the leading octet, determine the length of the UTF-8 sequence. - if w == 1024 { - w = width(octet) - if w == 0 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect leading UTF-8 octet") - } - } else { - // Check if the trailing octet is correct. - if octet&0xC0 != 0x80 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect trailing UTF-8 octet") - } - } - - // Copy the octet and move the pointers. - *s = append(*s, octet) - skip(parser) - skip(parser) - skip(parser) - w-- - } - return true -} - -// Scan a block scalar. -func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { - // Eat the indicator '|' or '>'. - start_mark := parser.mark - skip(parser) - - // Scan the additional block scalar indicators. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check for a chomping indicator. - var chomping, increment int - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - // Set the chomping method and eat the indicator. - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - skip(parser) - - // Check for an indentation indicator. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if is_digit(parser.buffer, parser.buffer_pos) { - // Check that the indentation is greater than 0. - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an indentation indicator equal to 0") - return false - } - - // Get the indentation level and eat the indicator. - increment = as_digit(parser.buffer, parser.buffer_pos) - skip(parser) - } - - } else if is_digit(parser.buffer, parser.buffer_pos) { - // Do the same as above, but in the opposite order. - - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an indentation indicator equal to 0") - return false - } - increment = as_digit(parser.buffer, parser.buffer_pos) - skip(parser) - - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - skip(parser) - } - } - - // Eat whitespaces and comments to the end of the line. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // Check if we are at the end of the line. - if !is_breakz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "did not find expected comment or line break") - return false - } - - // Eat a line break. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } - - end_mark := parser.mark - - // Set the indentation level if it was specified. - var indent int - if increment > 0 { - if parser.indent >= 0 { - indent = parser.indent + increment - } else { - indent = increment - } - } - - // Scan the leading line breaks and determine the indentation level if needed. - var s, leading_break, trailing_breaks []byte - if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { - return false - } - - // Scan the block scalar content. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - var leading_blank, trailing_blank bool - for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { - // We are at the beginning of a non-empty line. - - // Is it a trailing whitespace? - trailing_blank = is_blank(parser.buffer, parser.buffer_pos) - - // Check if we need to fold the leading line break. - if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { - // Do we need to join the lines by space? - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } - } else { - s = append(s, leading_break...) - } - leading_break = leading_break[:0] - - // Append the remaining line breaks. - s = append(s, trailing_breaks...) - trailing_breaks = trailing_breaks[:0] - - // Is it a leading whitespace? - leading_blank = is_blank(parser.buffer, parser.buffer_pos) - - // Consume the current line. - for !is_breakz(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Consume the line break. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - leading_break = read_line(parser, leading_break) - - // Eat the following indentation spaces and line breaks. - if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { - return false - } - } - - // Chomp the tail. - if chomping != -1 { - s = append(s, leading_break...) - } - if chomping == 1 { - s = append(s, trailing_breaks...) - } - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_LITERAL_SCALAR_STYLE, - } - if !literal { - token.style = yaml_FOLDED_SCALAR_STYLE - } - return true -} - -// Scan indentation spaces and line breaks for a block scalar. Determine the -// indentation level if needed. -func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { - *end_mark = parser.mark - - // Eat the indentation spaces and line breaks. - max_indent := 0 - for { - // Eat the indentation spaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - if parser.mark.column > max_indent { - max_indent = parser.mark.column - } - - // Check for a tab character messing the indentation. - if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { - return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found a tab character where an indentation space is expected") - } - - // Have we found a non-empty line? - if !is_break(parser.buffer, parser.buffer_pos) { - break - } - - // Consume the line break. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - // [Go] Should really be returning breaks instead. - *breaks = read_line(parser, *breaks) - *end_mark = parser.mark - } - - // Determine the indentation level if needed. - if *indent == 0 { - *indent = max_indent - if *indent < parser.indent+1 { - *indent = parser.indent + 1 - } - if *indent < 1 { - *indent = 1 - } - } - return true -} - -// Scan a quoted scalar. -func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { - // Eat the left quote. - start_mark := parser.mark - skip(parser) - - // Consume the content of the quoted scalar. - var s, leading_break, trailing_breaks, whitespaces []byte - for { - // Check that there are no document indicators at the beginning of the line. - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos+0] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos+0] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz(parser.buffer, parser.buffer_pos+3) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected document indicator") - return false - } - - // Check for EOF. - if is_z(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected end of stream") - return false - } - - // Consume non-blank characters. - leading_blanks := false - for !is_blankz(parser.buffer, parser.buffer_pos) { - if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { - // Is is an escaped single quote. - s = append(s, '\'') - skip(parser) - skip(parser) - - } else if single && parser.buffer[parser.buffer_pos] == '\'' { - // It is a right single quote. - break - } else if !single && parser.buffer[parser.buffer_pos] == '"' { - // It is a right double quote. - break - - } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { - // It is an escaped line break. - if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { - return false - } - skip(parser) - skip_line(parser) - leading_blanks = true - break - - } else if !single && parser.buffer[parser.buffer_pos] == '\\' { - // It is an escape sequence. - code_length := 0 - - // Check the escape character. - switch parser.buffer[parser.buffer_pos+1] { - case '0': - s = append(s, 0) - case 'a': - s = append(s, '\x07') - case 'b': - s = append(s, '\x08') - case 't', '\t': - s = append(s, '\x09') - case 'n': - s = append(s, '\x0A') - case 'v': - s = append(s, '\x0B') - case 'f': - s = append(s, '\x0C') - case 'r': - s = append(s, '\x0D') - case 'e': - s = append(s, '\x1B') - case ' ': - s = append(s, '\x20') - case '"': - s = append(s, '"') - case '\'': - s = append(s, '\'') - case '\\': - s = append(s, '\\') - case 'N': // NEL (#x85) - s = append(s, '\xC2') - s = append(s, '\x85') - case '_': // #xA0 - s = append(s, '\xC2') - s = append(s, '\xA0') - case 'L': // LS (#x2028) - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA8') - case 'P': // PS (#x2029) - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA9') - case 'x': - code_length = 2 - case 'u': - code_length = 4 - case 'U': - code_length = 8 - default: - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found unknown escape character") - return false - } - - skip(parser) - skip(parser) - - // Consume an arbitrary escape code. - if code_length > 0 { - var value int - - // Scan the character value. - if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { - return false - } - for k := 0; k < code_length; k++ { - if !is_hex(parser.buffer, parser.buffer_pos+k) { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "did not find expected hexdecimal number") - return false - } - value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) - } - - // Check the value and write the character. - if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found invalid Unicode character escape code") - return false - } - if value <= 0x7F { - s = append(s, byte(value)) - } else if value <= 0x7FF { - s = append(s, byte(0xC0+(value>>6))) - s = append(s, byte(0x80+(value&0x3F))) - } else if value <= 0xFFFF { - s = append(s, byte(0xE0+(value>>12))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } else { - s = append(s, byte(0xF0+(value>>18))) - s = append(s, byte(0x80+((value>>12)&0x3F))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } - - // Advance the pointer. - for k := 0; k < code_length; k++ { - skip(parser) - } - } - } else { - // It is a non-escaped non-blank character. - s = read(parser, s) - } - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - } - - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check if we are at the end of the scalar. - if single { - if parser.buffer[parser.buffer_pos] == '\'' { - break - } - } else { - if parser.buffer[parser.buffer_pos] == '"' { - break - } - } - - // Consume blank characters. - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { - if is_blank(parser.buffer, parser.buffer_pos) { - // Consume a space or a tab character. - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - // Check if it is a first line break. - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Join the whitespaces or fold line breaks. - if leading_blanks { - // Do we need to fold line breaks? - if len(leading_break) > 0 && leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - } - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - } - trailing_breaks = trailing_breaks[:0] - leading_break = leading_break[:0] - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - // Eat the right quote. - skip(parser) - end_mark := parser.mark - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_SINGLE_QUOTED_SCALAR_STYLE, - } - if !single { - token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - return true -} - -// Scan a plain scalar. -func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { - - var s, leading_break, trailing_breaks, whitespaces []byte - var leading_blanks bool - var indent = parser.indent + 1 - - start_mark := parser.mark - end_mark := parser.mark - - // Consume the content of the plain scalar. - for { - // Check for a document indicator. - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos+0] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos+0] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz(parser.buffer, parser.buffer_pos+3) { - break - } - - // Check for a comment. - if parser.buffer[parser.buffer_pos] == '#' { - break - } - - // Consume non-blank characters. - for !is_blankz(parser.buffer, parser.buffer_pos) { - - // Check for indicators that may end a plain scalar. - if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || - (parser.flow_level > 0 && - (parser.buffer[parser.buffer_pos] == ',' || - parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || - parser.buffer[parser.buffer_pos] == '}')) { - break - } - - // Check if we need to join whitespaces and breaks. - if leading_blanks || len(whitespaces) > 0 { - if leading_blanks { - // Do we need to fold line breaks? - if leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - } - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - } - trailing_breaks = trailing_breaks[:0] - leading_break = leading_break[:0] - leading_blanks = false - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - // Copy the character. - s = read(parser, s) - - end_mark = parser.mark - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - } - - // Is it the end? - if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { - break - } - - // Consume blank characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { - if is_blank(parser.buffer, parser.buffer_pos) { - - // Check for tab characters that abuse indentation. - if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", - start_mark, "found a tab character that violates indentation") - return false - } - - // Consume a space or a tab character. - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - // Check if it is a first line break. - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check indentation level. - if parser.flow_level == 0 && parser.mark.column < indent { - break - } - } - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_PLAIN_SCALAR_STYLE, - } - - // Note that we change the 'simple_key_allowed' flag. - if leading_blanks { - parser.simple_key_allowed = true - } - return true -} diff --git a/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/sorter.go b/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/sorter.go deleted file mode 100644 index 4c45e660a8f..00000000000 --- a/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/sorter.go +++ /dev/null @@ -1,113 +0,0 @@ -package yaml - -import ( - "reflect" - "unicode" -) - -type keyList []reflect.Value - -func (l keyList) Len() int { return len(l) } -func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } -func (l keyList) Less(i, j int) bool { - a := l[i] - b := l[j] - ak := a.Kind() - bk := b.Kind() - for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { - a = a.Elem() - ak = a.Kind() - } - for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { - b = b.Elem() - bk = b.Kind() - } - af, aok := keyFloat(a) - bf, bok := keyFloat(b) - if aok && bok { - if af != bf { - return af < bf - } - if ak != bk { - return ak < bk - } - return numLess(a, b) - } - if ak != reflect.String || bk != reflect.String { - return ak < bk - } - ar, br := []rune(a.String()), []rune(b.String()) - for i := 0; i < len(ar) && i < len(br); i++ { - if ar[i] == br[i] { - continue - } - al := unicode.IsLetter(ar[i]) - bl := unicode.IsLetter(br[i]) - if al && bl { - return ar[i] < br[i] - } - if al || bl { - return bl - } - var ai, bi int - var an, bn int64 - if ar[i] == '0' || br[i] == '0' { - for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- { - if ar[j] != '0' { - an = 1 - bn = 1 - break - } - } - } - for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { - an = an*10 + int64(ar[ai]-'0') - } - for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { - bn = bn*10 + int64(br[bi]-'0') - } - if an != bn { - return an < bn - } - if ai != bi { - return ai < bi - } - return ar[i] < br[i] - } - return len(ar) < len(br) -} - -// keyFloat returns a float value for v if it is a number/bool -// and whether it is a number/bool or not. -func keyFloat(v reflect.Value) (f float64, ok bool) { - switch v.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return float64(v.Int()), true - case reflect.Float32, reflect.Float64: - return v.Float(), true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return float64(v.Uint()), true - case reflect.Bool: - if v.Bool() { - return 1, true - } - return 0, true - } - return 0, false -} - -// numLess returns whether a < b. -// a and b must necessarily have the same kind. -func numLess(a, b reflect.Value) bool { - switch a.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return a.Int() < b.Int() - case reflect.Float32, reflect.Float64: - return a.Float() < b.Float() - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return a.Uint() < b.Uint() - case reflect.Bool: - return !a.Bool() && b.Bool() - } - panic("not a number") -} diff --git a/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/writerc.go b/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/writerc.go deleted file mode 100644 index a2dde608cb7..00000000000 --- a/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/writerc.go +++ /dev/null @@ -1,26 +0,0 @@ -package yaml - -// Set the writer error and return false. -func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_WRITER_ERROR - emitter.problem = problem - return false -} - -// Flush the output buffer. -func yaml_emitter_flush(emitter *yaml_emitter_t) bool { - if emitter.write_handler == nil { - panic("write handler not set") - } - - // Check if the buffer is empty. - if emitter.buffer_pos == 0 { - return true - } - - if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { - return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) - } - emitter.buffer_pos = 0 - return true -} diff --git a/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/yaml.go b/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/yaml.go deleted file mode 100644 index de85aa4cdb7..00000000000 --- a/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/yaml.go +++ /dev/null @@ -1,466 +0,0 @@ -// Package yaml implements YAML support for the Go language. -// -// Source code and other details for the project are available at GitHub: -// -// https://github.com/go-yaml/yaml -// -package yaml - -import ( - "errors" - "fmt" - "io" - "reflect" - "strings" - "sync" -) - -// MapSlice encodes and decodes as a YAML map. -// The order of keys is preserved when encoding and decoding. -type MapSlice []MapItem - -// MapItem is an item in a MapSlice. -type MapItem struct { - Key, Value interface{} -} - -// The Unmarshaler interface may be implemented by types to customize their -// behavior when being unmarshaled from a YAML document. The UnmarshalYAML -// method receives a function that may be called to unmarshal the original -// YAML value into a field or variable. It is safe to call the unmarshal -// function parameter more than once if necessary. -type Unmarshaler interface { - UnmarshalYAML(unmarshal func(interface{}) error) error -} - -// The Marshaler interface may be implemented by types to customize their -// behavior when being marshaled into a YAML document. The returned value -// is marshaled in place of the original value implementing Marshaler. -// -// If an error is returned by MarshalYAML, the marshaling procedure stops -// and returns with the provided error. -type Marshaler interface { - MarshalYAML() (interface{}, error) -} - -// Unmarshal decodes the first document found within the in byte slice -// and assigns decoded values into the out value. -// -// Maps and pointers (to a struct, string, int, etc) are accepted as out -// values. If an internal pointer within a struct is not initialized, -// the yaml package will initialize it if necessary for unmarshalling -// the provided data. The out parameter must not be nil. -// -// The type of the decoded values should be compatible with the respective -// values in out. If one or more values cannot be decoded due to a type -// mismatches, decoding continues partially until the end of the YAML -// content, and a *yaml.TypeError is returned with details for all -// missed values. -// -// Struct fields are only unmarshalled if they are exported (have an -// upper case first letter), and are unmarshalled using the field name -// lowercased as the default key. Custom keys may be defined via the -// "yaml" name in the field tag: the content preceding the first comma -// is used as the key, and the following comma-separated options are -// used to tweak the marshalling process (see Marshal). -// Conflicting names result in a runtime error. -// -// For example: -// -// type T struct { -// F int `yaml:"a,omitempty"` -// B int -// } -// var t T -// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) -// -// See the documentation of Marshal for the format of tags and a list of -// supported tag options. -// -func Unmarshal(in []byte, out interface{}) (err error) { - return unmarshal(in, out, false) -} - -// UnmarshalStrict is like Unmarshal except that any fields that are found -// in the data that do not have corresponding struct members, or mapping -// keys that are duplicates, will result in -// an error. -func UnmarshalStrict(in []byte, out interface{}) (err error) { - return unmarshal(in, out, true) -} - -// A Decorder reads and decodes YAML values from an input stream. -type Decoder struct { - strict bool - parser *parser -} - -// NewDecoder returns a new decoder that reads from r. -// -// The decoder introduces its own buffering and may read -// data from r beyond the YAML values requested. -func NewDecoder(r io.Reader) *Decoder { - return &Decoder{ - parser: newParserFromReader(r), - } -} - -// SetStrict sets whether strict decoding behaviour is enabled when -// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict. -func (dec *Decoder) SetStrict(strict bool) { - dec.strict = strict -} - -// Decode reads the next YAML-encoded value from its input -// and stores it in the value pointed to by v. -// -// See the documentation for Unmarshal for details about the -// conversion of YAML into a Go value. -func (dec *Decoder) Decode(v interface{}) (err error) { - d := newDecoder(dec.strict) - defer handleErr(&err) - node := dec.parser.parse() - if node == nil { - return io.EOF - } - out := reflect.ValueOf(v) - if out.Kind() == reflect.Ptr && !out.IsNil() { - out = out.Elem() - } - d.unmarshal(node, out) - if len(d.terrors) > 0 { - return &TypeError{d.terrors} - } - return nil -} - -func unmarshal(in []byte, out interface{}, strict bool) (err error) { - defer handleErr(&err) - d := newDecoder(strict) - p := newParser(in) - defer p.destroy() - node := p.parse() - if node != nil { - v := reflect.ValueOf(out) - if v.Kind() == reflect.Ptr && !v.IsNil() { - v = v.Elem() - } - d.unmarshal(node, v) - } - if len(d.terrors) > 0 { - return &TypeError{d.terrors} - } - return nil -} - -// Marshal serializes the value provided into a YAML document. The structure -// of the generated document will reflect the structure of the value itself. -// Maps and pointers (to struct, string, int, etc) are accepted as the in value. -// -// Struct fields are only marshalled if they are exported (have an upper case -// first letter), and are marshalled using the field name lowercased as the -// default key. Custom keys may be defined via the "yaml" name in the field -// tag: the content preceding the first comma is used as the key, and the -// following comma-separated options are used to tweak the marshalling process. -// Conflicting names result in a runtime error. -// -// The field tag format accepted is: -// -// `(...) yaml:"[][,[,]]" (...)` -// -// The following flags are currently supported: -// -// omitempty Only include the field if it's not set to the zero -// value for the type or to empty slices or maps. -// Zero valued structs will be omitted if all their public -// fields are zero, unless they implement an IsZero -// method (see the IsZeroer interface type), in which -// case the field will be included if that method returns true. -// -// flow Marshal using a flow style (useful for structs, -// sequences and maps). -// -// inline Inline the field, which must be a struct or a map, -// causing all of its fields or keys to be processed as if -// they were part of the outer struct. For maps, keys must -// not conflict with the yaml keys of other struct fields. -// -// In addition, if the key is "-", the field is ignored. -// -// For example: -// -// type T struct { -// F int `yaml:"a,omitempty"` -// B int -// } -// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" -// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" -// -func Marshal(in interface{}) (out []byte, err error) { - defer handleErr(&err) - e := newEncoder() - defer e.destroy() - e.marshalDoc("", reflect.ValueOf(in)) - e.finish() - out = e.out - return -} - -// An Encoder writes YAML values to an output stream. -type Encoder struct { - encoder *encoder -} - -// NewEncoder returns a new encoder that writes to w. -// The Encoder should be closed after use to flush all data -// to w. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - encoder: newEncoderWithWriter(w), - } -} - -// Encode writes the YAML encoding of v to the stream. -// If multiple items are encoded to the stream, the -// second and subsequent document will be preceded -// with a "---" document separator, but the first will not. -// -// See the documentation for Marshal for details about the conversion of Go -// values to YAML. -func (e *Encoder) Encode(v interface{}) (err error) { - defer handleErr(&err) - e.encoder.marshalDoc("", reflect.ValueOf(v)) - return nil -} - -// Close closes the encoder by writing any remaining data. -// It does not write a stream terminating string "...". -func (e *Encoder) Close() (err error) { - defer handleErr(&err) - e.encoder.finish() - return nil -} - -func handleErr(err *error) { - if v := recover(); v != nil { - if e, ok := v.(yamlError); ok { - *err = e.err - } else { - panic(v) - } - } -} - -type yamlError struct { - err error -} - -func fail(err error) { - panic(yamlError{err}) -} - -func failf(format string, args ...interface{}) { - panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) -} - -// A TypeError is returned by Unmarshal when one or more fields in -// the YAML document cannot be properly decoded into the requested -// types. When this error is returned, the value is still -// unmarshaled partially. -type TypeError struct { - Errors []string -} - -func (e *TypeError) Error() string { - return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) -} - -// -------------------------------------------------------------------------- -// Maintain a mapping of keys to structure field indexes - -// The code in this section was copied from mgo/bson. - -// structInfo holds details for the serialization of fields of -// a given struct. -type structInfo struct { - FieldsMap map[string]fieldInfo - FieldsList []fieldInfo - - // InlineMap is the number of the field in the struct that - // contains an ,inline map, or -1 if there's none. - InlineMap int -} - -type fieldInfo struct { - Key string - Num int - OmitEmpty bool - Flow bool - // Id holds the unique field identifier, so we can cheaply - // check for field duplicates without maintaining an extra map. - Id int - - // Inline holds the field index if the field is part of an inlined struct. - Inline []int -} - -var structMap = make(map[reflect.Type]*structInfo) -var fieldMapMutex sync.RWMutex - -func getStructInfo(st reflect.Type) (*structInfo, error) { - fieldMapMutex.RLock() - sinfo, found := structMap[st] - fieldMapMutex.RUnlock() - if found { - return sinfo, nil - } - - n := st.NumField() - fieldsMap := make(map[string]fieldInfo) - fieldsList := make([]fieldInfo, 0, n) - inlineMap := -1 - for i := 0; i != n; i++ { - field := st.Field(i) - if field.PkgPath != "" && !field.Anonymous { - continue // Private field - } - - info := fieldInfo{Num: i} - - tag := field.Tag.Get("yaml") - if tag == "" && strings.Index(string(field.Tag), ":") < 0 { - tag = string(field.Tag) - } - if tag == "-" { - continue - } - - inline := false - fields := strings.Split(tag, ",") - if len(fields) > 1 { - for _, flag := range fields[1:] { - switch flag { - case "omitempty": - info.OmitEmpty = true - case "flow": - info.Flow = true - case "inline": - inline = true - default: - return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) - } - } - tag = fields[0] - } - - if inline { - switch field.Type.Kind() { - case reflect.Map: - if inlineMap >= 0 { - return nil, errors.New("Multiple ,inline maps in struct " + st.String()) - } - if field.Type.Key() != reflect.TypeOf("") { - return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) - } - inlineMap = info.Num - case reflect.Struct: - sinfo, err := getStructInfo(field.Type) - if err != nil { - return nil, err - } - for _, finfo := range sinfo.FieldsList { - if _, found := fieldsMap[finfo.Key]; found { - msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - if finfo.Inline == nil { - finfo.Inline = []int{i, finfo.Num} - } else { - finfo.Inline = append([]int{i}, finfo.Inline...) - } - finfo.Id = len(fieldsList) - fieldsMap[finfo.Key] = finfo - fieldsList = append(fieldsList, finfo) - } - default: - //return nil, errors.New("Option ,inline needs a struct value or map field") - return nil, errors.New("Option ,inline needs a struct value field") - } - continue - } - - if tag != "" { - info.Key = tag - } else { - info.Key = strings.ToLower(field.Name) - } - - if _, found = fieldsMap[info.Key]; found { - msg := "Duplicated key '" + info.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - - info.Id = len(fieldsList) - fieldsList = append(fieldsList, info) - fieldsMap[info.Key] = info - } - - sinfo = &structInfo{ - FieldsMap: fieldsMap, - FieldsList: fieldsList, - InlineMap: inlineMap, - } - - fieldMapMutex.Lock() - structMap[st] = sinfo - fieldMapMutex.Unlock() - return sinfo, nil -} - -// IsZeroer is used to check whether an object is zero to -// determine whether it should be omitted when marshaling -// with the omitempty flag. One notable implementation -// is time.Time. -type IsZeroer interface { - IsZero() bool -} - -func isZero(v reflect.Value) bool { - kind := v.Kind() - if z, ok := v.Interface().(IsZeroer); ok { - if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { - return true - } - return z.IsZero() - } - switch kind { - case reflect.String: - return len(v.String()) == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - case reflect.Slice: - return v.Len() == 0 - case reflect.Map: - return v.Len() == 0 - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Struct: - vt := v.Type() - for i := v.NumField() - 1; i >= 0; i-- { - if vt.Field(i).PkgPath != "" { - continue // Private field - } - if !isZero(v.Field(i)) { - return false - } - } - return true - } - return false -} diff --git a/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/yamlh.go b/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/yamlh.go deleted file mode 100644 index e25cee563be..00000000000 --- a/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/yamlh.go +++ /dev/null @@ -1,738 +0,0 @@ -package yaml - -import ( - "fmt" - "io" -) - -// The version directive data. -type yaml_version_directive_t struct { - major int8 // The major version number. - minor int8 // The minor version number. -} - -// The tag directive data. -type yaml_tag_directive_t struct { - handle []byte // The tag handle. - prefix []byte // The tag prefix. -} - -type yaml_encoding_t int - -// The stream encoding. -const ( - // Let the parser choose the encoding. - yaml_ANY_ENCODING yaml_encoding_t = iota - - yaml_UTF8_ENCODING // The default UTF-8 encoding. - yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. - yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. -) - -type yaml_break_t int - -// Line break types. -const ( - // Let the parser choose the break type. - yaml_ANY_BREAK yaml_break_t = iota - - yaml_CR_BREAK // Use CR for line breaks (Mac style). - yaml_LN_BREAK // Use LN for line breaks (Unix style). - yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). -) - -type yaml_error_type_t int - -// Many bad things could happen with the parser and emitter. -const ( - // No error is produced. - yaml_NO_ERROR yaml_error_type_t = iota - - yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. - yaml_READER_ERROR // Cannot read or decode the input stream. - yaml_SCANNER_ERROR // Cannot scan the input stream. - yaml_PARSER_ERROR // Cannot parse the input stream. - yaml_COMPOSER_ERROR // Cannot compose a YAML document. - yaml_WRITER_ERROR // Cannot write to the output stream. - yaml_EMITTER_ERROR // Cannot emit a YAML stream. -) - -// The pointer position. -type yaml_mark_t struct { - index int // The position index. - line int // The position line. - column int // The position column. -} - -// Node Styles - -type yaml_style_t int8 - -type yaml_scalar_style_t yaml_style_t - -// Scalar styles. -const ( - // Let the emitter choose the style. - yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota - - yaml_PLAIN_SCALAR_STYLE // The plain scalar style. - yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. - yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. - yaml_LITERAL_SCALAR_STYLE // The literal scalar style. - yaml_FOLDED_SCALAR_STYLE // The folded scalar style. -) - -type yaml_sequence_style_t yaml_style_t - -// Sequence styles. -const ( - // Let the emitter choose the style. - yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota - - yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. - yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. -) - -type yaml_mapping_style_t yaml_style_t - -// Mapping styles. -const ( - // Let the emitter choose the style. - yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota - - yaml_BLOCK_MAPPING_STYLE // The block mapping style. - yaml_FLOW_MAPPING_STYLE // The flow mapping style. -) - -// Tokens - -type yaml_token_type_t int - -// Token types. -const ( - // An empty token. - yaml_NO_TOKEN yaml_token_type_t = iota - - yaml_STREAM_START_TOKEN // A STREAM-START token. - yaml_STREAM_END_TOKEN // A STREAM-END token. - - yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. - yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. - yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. - yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. - - yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. - yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. - yaml_BLOCK_END_TOKEN // A BLOCK-END token. - - yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. - yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. - yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. - yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. - - yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. - yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. - yaml_KEY_TOKEN // A KEY token. - yaml_VALUE_TOKEN // A VALUE token. - - yaml_ALIAS_TOKEN // An ALIAS token. - yaml_ANCHOR_TOKEN // An ANCHOR token. - yaml_TAG_TOKEN // A TAG token. - yaml_SCALAR_TOKEN // A SCALAR token. -) - -func (tt yaml_token_type_t) String() string { - switch tt { - case yaml_NO_TOKEN: - return "yaml_NO_TOKEN" - case yaml_STREAM_START_TOKEN: - return "yaml_STREAM_START_TOKEN" - case yaml_STREAM_END_TOKEN: - return "yaml_STREAM_END_TOKEN" - case yaml_VERSION_DIRECTIVE_TOKEN: - return "yaml_VERSION_DIRECTIVE_TOKEN" - case yaml_TAG_DIRECTIVE_TOKEN: - return "yaml_TAG_DIRECTIVE_TOKEN" - case yaml_DOCUMENT_START_TOKEN: - return "yaml_DOCUMENT_START_TOKEN" - case yaml_DOCUMENT_END_TOKEN: - return "yaml_DOCUMENT_END_TOKEN" - case yaml_BLOCK_SEQUENCE_START_TOKEN: - return "yaml_BLOCK_SEQUENCE_START_TOKEN" - case yaml_BLOCK_MAPPING_START_TOKEN: - return "yaml_BLOCK_MAPPING_START_TOKEN" - case yaml_BLOCK_END_TOKEN: - return "yaml_BLOCK_END_TOKEN" - case yaml_FLOW_SEQUENCE_START_TOKEN: - return "yaml_FLOW_SEQUENCE_START_TOKEN" - case yaml_FLOW_SEQUENCE_END_TOKEN: - return "yaml_FLOW_SEQUENCE_END_TOKEN" - case yaml_FLOW_MAPPING_START_TOKEN: - return "yaml_FLOW_MAPPING_START_TOKEN" - case yaml_FLOW_MAPPING_END_TOKEN: - return "yaml_FLOW_MAPPING_END_TOKEN" - case yaml_BLOCK_ENTRY_TOKEN: - return "yaml_BLOCK_ENTRY_TOKEN" - case yaml_FLOW_ENTRY_TOKEN: - return "yaml_FLOW_ENTRY_TOKEN" - case yaml_KEY_TOKEN: - return "yaml_KEY_TOKEN" - case yaml_VALUE_TOKEN: - return "yaml_VALUE_TOKEN" - case yaml_ALIAS_TOKEN: - return "yaml_ALIAS_TOKEN" - case yaml_ANCHOR_TOKEN: - return "yaml_ANCHOR_TOKEN" - case yaml_TAG_TOKEN: - return "yaml_TAG_TOKEN" - case yaml_SCALAR_TOKEN: - return "yaml_SCALAR_TOKEN" - } - return "" -} - -// The token structure. -type yaml_token_t struct { - // The token type. - typ yaml_token_type_t - - // The start/end of the token. - start_mark, end_mark yaml_mark_t - - // The stream encoding (for yaml_STREAM_START_TOKEN). - encoding yaml_encoding_t - - // The alias/anchor/scalar value or tag/tag directive handle - // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). - value []byte - - // The tag suffix (for yaml_TAG_TOKEN). - suffix []byte - - // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). - prefix []byte - - // The scalar style (for yaml_SCALAR_TOKEN). - style yaml_scalar_style_t - - // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). - major, minor int8 -} - -// Events - -type yaml_event_type_t int8 - -// Event types. -const ( - // An empty event. - yaml_NO_EVENT yaml_event_type_t = iota - - yaml_STREAM_START_EVENT // A STREAM-START event. - yaml_STREAM_END_EVENT // A STREAM-END event. - yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. - yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. - yaml_ALIAS_EVENT // An ALIAS event. - yaml_SCALAR_EVENT // A SCALAR event. - yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. - yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. - yaml_MAPPING_START_EVENT // A MAPPING-START event. - yaml_MAPPING_END_EVENT // A MAPPING-END event. -) - -var eventStrings = []string{ - yaml_NO_EVENT: "none", - yaml_STREAM_START_EVENT: "stream start", - yaml_STREAM_END_EVENT: "stream end", - yaml_DOCUMENT_START_EVENT: "document start", - yaml_DOCUMENT_END_EVENT: "document end", - yaml_ALIAS_EVENT: "alias", - yaml_SCALAR_EVENT: "scalar", - yaml_SEQUENCE_START_EVENT: "sequence start", - yaml_SEQUENCE_END_EVENT: "sequence end", - yaml_MAPPING_START_EVENT: "mapping start", - yaml_MAPPING_END_EVENT: "mapping end", -} - -func (e yaml_event_type_t) String() string { - if e < 0 || int(e) >= len(eventStrings) { - return fmt.Sprintf("unknown event %d", e) - } - return eventStrings[e] -} - -// The event structure. -type yaml_event_t struct { - - // The event type. - typ yaml_event_type_t - - // The start and end of the event. - start_mark, end_mark yaml_mark_t - - // The document encoding (for yaml_STREAM_START_EVENT). - encoding yaml_encoding_t - - // The version directive (for yaml_DOCUMENT_START_EVENT). - version_directive *yaml_version_directive_t - - // The list of tag directives (for yaml_DOCUMENT_START_EVENT). - tag_directives []yaml_tag_directive_t - - // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). - anchor []byte - - // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). - tag []byte - - // The scalar value (for yaml_SCALAR_EVENT). - value []byte - - // Is the document start/end indicator implicit, or the tag optional? - // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). - implicit bool - - // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). - quoted_implicit bool - - // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). - style yaml_style_t -} - -func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } -func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } -func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } - -// Nodes - -const ( - yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. - yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. - yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. - yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. - yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. - yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. - - yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. - yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. - - // Not in original libyaml. - yaml_BINARY_TAG = "tag:yaml.org,2002:binary" - yaml_MERGE_TAG = "tag:yaml.org,2002:merge" - - yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. - yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. - yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. -) - -type yaml_node_type_t int - -// Node types. -const ( - // An empty node. - yaml_NO_NODE yaml_node_type_t = iota - - yaml_SCALAR_NODE // A scalar node. - yaml_SEQUENCE_NODE // A sequence node. - yaml_MAPPING_NODE // A mapping node. -) - -// An element of a sequence node. -type yaml_node_item_t int - -// An element of a mapping node. -type yaml_node_pair_t struct { - key int // The key of the element. - value int // The value of the element. -} - -// The node structure. -type yaml_node_t struct { - typ yaml_node_type_t // The node type. - tag []byte // The node tag. - - // The node data. - - // The scalar parameters (for yaml_SCALAR_NODE). - scalar struct { - value []byte // The scalar value. - length int // The length of the scalar value. - style yaml_scalar_style_t // The scalar style. - } - - // The sequence parameters (for YAML_SEQUENCE_NODE). - sequence struct { - items_data []yaml_node_item_t // The stack of sequence items. - style yaml_sequence_style_t // The sequence style. - } - - // The mapping parameters (for yaml_MAPPING_NODE). - mapping struct { - pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). - pairs_start *yaml_node_pair_t // The beginning of the stack. - pairs_end *yaml_node_pair_t // The end of the stack. - pairs_top *yaml_node_pair_t // The top of the stack. - style yaml_mapping_style_t // The mapping style. - } - - start_mark yaml_mark_t // The beginning of the node. - end_mark yaml_mark_t // The end of the node. - -} - -// The document structure. -type yaml_document_t struct { - - // The document nodes. - nodes []yaml_node_t - - // The version directive. - version_directive *yaml_version_directive_t - - // The list of tag directives. - tag_directives_data []yaml_tag_directive_t - tag_directives_start int // The beginning of the tag directives list. - tag_directives_end int // The end of the tag directives list. - - start_implicit int // Is the document start indicator implicit? - end_implicit int // Is the document end indicator implicit? - - // The start/end of the document. - start_mark, end_mark yaml_mark_t -} - -// The prototype of a read handler. -// -// The read handler is called when the parser needs to read more bytes from the -// source. The handler should write not more than size bytes to the buffer. -// The number of written bytes should be set to the size_read variable. -// -// [in,out] data A pointer to an application data specified by -// yaml_parser_set_input(). -// [out] buffer The buffer to write the data from the source. -// [in] size The size of the buffer. -// [out] size_read The actual number of bytes read from the source. -// -// On success, the handler should return 1. If the handler failed, -// the returned value should be 0. On EOF, the handler should set the -// size_read to 0 and return 1. -type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) - -// This structure holds information about a potential simple key. -type yaml_simple_key_t struct { - possible bool // Is a simple key possible? - required bool // Is a simple key required? - token_number int // The number of the token. - mark yaml_mark_t // The position mark. -} - -// The states of the parser. -type yaml_parser_state_t int - -const ( - yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota - - yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. - yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. - yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. - yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. - yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. - yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. - yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. - yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. - yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. - yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. - yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. - yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. - yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. - yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. - yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. - yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. - yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. - yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. - yaml_PARSE_END_STATE // Expect nothing. -) - -func (ps yaml_parser_state_t) String() string { - switch ps { - case yaml_PARSE_STREAM_START_STATE: - return "yaml_PARSE_STREAM_START_STATE" - case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: - return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" - case yaml_PARSE_DOCUMENT_START_STATE: - return "yaml_PARSE_DOCUMENT_START_STATE" - case yaml_PARSE_DOCUMENT_CONTENT_STATE: - return "yaml_PARSE_DOCUMENT_CONTENT_STATE" - case yaml_PARSE_DOCUMENT_END_STATE: - return "yaml_PARSE_DOCUMENT_END_STATE" - case yaml_PARSE_BLOCK_NODE_STATE: - return "yaml_PARSE_BLOCK_NODE_STATE" - case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: - return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" - case yaml_PARSE_FLOW_NODE_STATE: - return "yaml_PARSE_FLOW_NODE_STATE" - case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: - return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" - case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: - return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" - case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: - return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" - case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: - return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" - case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: - return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" - case yaml_PARSE_FLOW_MAPPING_KEY_STATE: - return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" - case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: - return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: - return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" - case yaml_PARSE_END_STATE: - return "yaml_PARSE_END_STATE" - } - return "" -} - -// This structure holds aliases data. -type yaml_alias_data_t struct { - anchor []byte // The anchor. - index int // The node id. - mark yaml_mark_t // The anchor mark. -} - -// The parser structure. -// -// All members are internal. Manage the structure using the -// yaml_parser_ family of functions. -type yaml_parser_t struct { - - // Error handling - - error yaml_error_type_t // Error type. - - problem string // Error description. - - // The byte about which the problem occurred. - problem_offset int - problem_value int - problem_mark yaml_mark_t - - // The error context. - context string - context_mark yaml_mark_t - - // Reader stuff - - read_handler yaml_read_handler_t // Read handler. - - input_reader io.Reader // File input data. - input []byte // String input data. - input_pos int - - eof bool // EOF flag - - buffer []byte // The working buffer. - buffer_pos int // The current position of the buffer. - - unread int // The number of unread characters in the buffer. - - raw_buffer []byte // The raw buffer. - raw_buffer_pos int // The current position of the buffer. - - encoding yaml_encoding_t // The input encoding. - - offset int // The offset of the current position (in bytes). - mark yaml_mark_t // The mark of the current position. - - // Scanner stuff - - stream_start_produced bool // Have we started to scan the input stream? - stream_end_produced bool // Have we reached the end of the input stream? - - flow_level int // The number of unclosed '[' and '{' indicators. - - tokens []yaml_token_t // The tokens queue. - tokens_head int // The head of the tokens queue. - tokens_parsed int // The number of tokens fetched from the queue. - token_available bool // Does the tokens queue contain a token ready for dequeueing. - - indent int // The current indentation level. - indents []int // The indentation levels stack. - - simple_key_allowed bool // May a simple key occur at the current position? - simple_keys []yaml_simple_key_t // The stack of simple keys. - - // Parser stuff - - state yaml_parser_state_t // The current parser state. - states []yaml_parser_state_t // The parser states stack. - marks []yaml_mark_t // The stack of marks. - tag_directives []yaml_tag_directive_t // The list of TAG directives. - - // Dumper stuff - - aliases []yaml_alias_data_t // The alias data. - - document *yaml_document_t // The currently parsed document. -} - -// Emitter Definitions - -// The prototype of a write handler. -// -// The write handler is called when the emitter needs to flush the accumulated -// characters to the output. The handler should write @a size bytes of the -// @a buffer to the output. -// -// @param[in,out] data A pointer to an application data specified by -// yaml_emitter_set_output(). -// @param[in] buffer The buffer with bytes to be written. -// @param[in] size The size of the buffer. -// -// @returns On success, the handler should return @c 1. If the handler failed, -// the returned value should be @c 0. -// -type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error - -type yaml_emitter_state_t int - -// The emitter states. -const ( - // Expect STREAM-START. - yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota - - yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. - yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. - yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. - yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. - yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. - yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. - yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. - yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. - yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. - yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. - yaml_EMIT_END_STATE // Expect nothing. -) - -// The emitter structure. -// -// All members are internal. Manage the structure using the @c yaml_emitter_ -// family of functions. -type yaml_emitter_t struct { - - // Error handling - - error yaml_error_type_t // Error type. - problem string // Error description. - - // Writer stuff - - write_handler yaml_write_handler_t // Write handler. - - output_buffer *[]byte // String output data. - output_writer io.Writer // File output data. - - buffer []byte // The working buffer. - buffer_pos int // The current position of the buffer. - - raw_buffer []byte // The raw buffer. - raw_buffer_pos int // The current position of the buffer. - - encoding yaml_encoding_t // The stream encoding. - - // Emitter stuff - - canonical bool // If the output is in the canonical style? - best_indent int // The number of indentation spaces. - best_width int // The preferred width of the output lines. - unicode bool // Allow unescaped non-ASCII characters? - line_break yaml_break_t // The preferred line break. - - state yaml_emitter_state_t // The current emitter state. - states []yaml_emitter_state_t // The stack of states. - - events []yaml_event_t // The event queue. - events_head int // The head of the event queue. - - indents []int // The stack of indentation levels. - - tag_directives []yaml_tag_directive_t // The list of tag directives. - - indent int // The current indentation level. - - flow_level int // The current flow level. - - root_context bool // Is it the document root context? - sequence_context bool // Is it a sequence context? - mapping_context bool // Is it a mapping context? - simple_key_context bool // Is it a simple mapping key context? - - line int // The current line. - column int // The current column. - whitespace bool // If the last character was a whitespace? - indention bool // If the last character was an indentation character (' ', '-', '?', ':')? - open_ended bool // If an explicit document end is required? - - // Anchor analysis. - anchor_data struct { - anchor []byte // The anchor value. - alias bool // Is it an alias? - } - - // Tag analysis. - tag_data struct { - handle []byte // The tag handle. - suffix []byte // The tag suffix. - } - - // Scalar analysis. - scalar_data struct { - value []byte // The scalar value. - multiline bool // Does the scalar contain line breaks? - flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? - block_plain_allowed bool // Can the scalar be expressed in the block plain style? - single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? - block_allowed bool // Can the scalar be expressed in the literal or folded styles? - style yaml_scalar_style_t // The output style. - } - - // Dumper stuff - - opened bool // If the stream was already opened? - closed bool // If the stream was already closed? - - // The information associated with the document nodes. - anchors *struct { - references int // The number of references. - anchor int // The anchor id. - serialized bool // If the node has been emitted? - } - - last_anchor_id int // The last assigned anchor id. - - document *yaml_document_t // The currently emitted document. -} diff --git a/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/yamlprivateh.go deleted file mode 100644 index 8110ce3c37a..00000000000 --- a/config-connector/tests/ccs-test/vendor/gopkg.in/yaml.v2/yamlprivateh.go +++ /dev/null @@ -1,173 +0,0 @@ -package yaml - -const ( - // The size of the input raw buffer. - input_raw_buffer_size = 512 - - // The size of the input buffer. - // It should be possible to decode the whole raw buffer. - input_buffer_size = input_raw_buffer_size * 3 - - // The size of the output buffer. - output_buffer_size = 128 - - // The size of the output raw buffer. - // It should be possible to encode the whole output buffer. - output_raw_buffer_size = (output_buffer_size*2 + 2) - - // The size of other stacks and queues. - initial_stack_size = 16 - initial_queue_size = 16 - initial_string_size = 16 -) - -// Check if the character at the specified position is an alphabetical -// character, a digit, '_', or '-'. -func is_alpha(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' -} - -// Check if the character at the specified position is a digit. -func is_digit(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' -} - -// Get the value of a digit. -func as_digit(b []byte, i int) int { - return int(b[i]) - '0' -} - -// Check if the character at the specified position is a hex-digit. -func is_hex(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' -} - -// Get the value of a hex-digit. -func as_hex(b []byte, i int) int { - bi := b[i] - if bi >= 'A' && bi <= 'F' { - return int(bi) - 'A' + 10 - } - if bi >= 'a' && bi <= 'f' { - return int(bi) - 'a' + 10 - } - return int(bi) - '0' -} - -// Check if the character is ASCII. -func is_ascii(b []byte, i int) bool { - return b[i] <= 0x7F -} - -// Check if the character at the start of the buffer can be printed unescaped. -func is_printable(b []byte, i int) bool { - return ((b[i] == 0x0A) || // . == #x0A - (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E - (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF - (b[i] > 0xC2 && b[i] < 0xED) || - (b[i] == 0xED && b[i+1] < 0xA0) || - (b[i] == 0xEE) || - (b[i] == 0xEF && // #xE000 <= . <= #xFFFD - !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF - !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) -} - -// Check if the character at the specified position is NUL. -func is_z(b []byte, i int) bool { - return b[i] == 0x00 -} - -// Check if the beginning of the buffer is a BOM. -func is_bom(b []byte, i int) bool { - return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF -} - -// Check if the character at the specified position is space. -func is_space(b []byte, i int) bool { - return b[i] == ' ' -} - -// Check if the character at the specified position is tab. -func is_tab(b []byte, i int) bool { - return b[i] == '\t' -} - -// Check if the character at the specified position is blank (space or tab). -func is_blank(b []byte, i int) bool { - //return is_space(b, i) || is_tab(b, i) - return b[i] == ' ' || b[i] == '\t' -} - -// Check if the character at the specified position is a line break. -func is_break(b []byte, i int) bool { - return (b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) -} - -func is_crlf(b []byte, i int) bool { - return b[i] == '\r' && b[i+1] == '\n' -} - -// Check if the character is a line break or NUL. -func is_breakz(b []byte, i int) bool { - //return is_break(b, i) || is_z(b, i) - return ( // is_break: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - // is_z: - b[i] == 0) -} - -// Check if the character is a line break, space, or NUL. -func is_spacez(b []byte, i int) bool { - //return is_space(b, i) || is_breakz(b, i) - return ( // is_space: - b[i] == ' ' || - // is_breakz: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - b[i] == 0) -} - -// Check if the character is a line break, space, tab, or NUL. -func is_blankz(b []byte, i int) bool { - //return is_blank(b, i) || is_breakz(b, i) - return ( // is_blank: - b[i] == ' ' || b[i] == '\t' || - // is_breakz: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - b[i] == 0) -} - -// Determine the width of the character. -func width(b byte) int { - // Don't replace these by a switch without first - // confirming that it is being inlined. - if b&0x80 == 0x00 { - return 1 - } - if b&0xE0 == 0xC0 { - return 2 - } - if b&0xF0 == 0xE0 { - return 3 - } - if b&0xF8 == 0xF0 { - return 4 - } - return 0 - -} diff --git a/config-connector/tests/ccs-test/vendor/modules.txt b/config-connector/tests/ccs-test/vendor/modules.txt deleted file mode 100644 index b60b7d258aa..00000000000 --- a/config-connector/tests/ccs-test/vendor/modules.txt +++ /dev/null @@ -1,12 +0,0 @@ -# github.com/ghodss/yaml v1.0.0 -github.com/ghodss/yaml -# github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b -github.com/golang/glog -# github.com/inconshreveable/mousetrap v1.0.0 -github.com/inconshreveable/mousetrap -# github.com/spf13/cobra v1.0.0 -github.com/spf13/cobra -# github.com/spf13/pflag v1.0.3 -github.com/spf13/pflag -# gopkg.in/yaml.v2 v2.2.2 -gopkg.in/yaml.v2 diff --git a/config-connector/tests/testcases/environments.template b/config-connector/tests/testcases/environments.template deleted file mode 100644 index 6f46fbba2db..00000000000 --- a/config-connector/tests/testcases/environments.template +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# RANDOM_ID should not be defined in the environments file. -BILLING_ACCOUNT_ID: ${BILLING_ACCOUNT_ID?} -BUCKET_NAME: ${BUCKET_NAME?} -DEFAULT_NAMESPACE: ${DEFAULT_NAMESPACE?} -DEFAULT_PROJECT_ID: ${DEFAULT_PROJECT_ID?} -FOLDER_ID: ${FOLDER_ID?} -HOST_PROJECT_ID: ${HOST_PROJECT_ID?} -IAM_MEMBER: ${IAM_MEMBER?} -INSTANCE_NAME: ${INSTANCE_NAME?} -ORG_ID: ${ORG_ID?} -# PASSWORD needs to be base64-encoded. -PASSWORD: ${PASSWORD?} -PROJECT_ID: ${PROJECT_ID?} -SERVICE_PROJECT_ID: ${SERVICE_PROJECT_ID?} -USERNAME_1: ${USERNAME_1?} -USERNAME_2: ${USERNAME_2?} -USERNAME_3: ${USERNAME_3?} diff --git a/config-connector/tests/testcases/iam/kpt/folder-iam/original_values.yaml b/config-connector/tests/testcases/iam/kpt/folder-iam/original_values.yaml deleted file mode 100644 index 4272535883e..00000000000 --- a/config-connector/tests/testcases/iam/kpt/folder-iam/original_values.yaml +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -folder-id: ${FOLDER_ID?} -iam-member: ${IAM_MEMBER?} diff --git a/config-connector/tests/testcases/iam/kpt/folder-iam/required_fields_only.yaml b/config-connector/tests/testcases/iam/kpt/folder-iam/required_fields_only.yaml deleted file mode 100644 index 12f4f439fc1..00000000000 --- a/config-connector/tests/testcases/iam/kpt/folder-iam/required_fields_only.yaml +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -folder-id: $FOLDER_ID -iam-member: $IAM_MEMBER diff --git a/config-connector/tests/testcases/iam/kpt/kms-crypto-key/original_values.yaml b/config-connector/tests/testcases/iam/kpt/kms-crypto-key/original_values.yaml deleted file mode 100644 index 709008ad9f7..00000000000 --- a/config-connector/tests/testcases/iam/kpt/kms-crypto-key/original_values.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -iam-member: ${IAM_MEMBER?} diff --git a/config-connector/tests/testcases/iam/kpt/kms-crypto-key/required_fields_only.yaml b/config-connector/tests/testcases/iam/kpt/kms-crypto-key/required_fields_only.yaml deleted file mode 100644 index 909fa7446ba..00000000000 --- a/config-connector/tests/testcases/iam/kpt/kms-crypto-key/required_fields_only.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -iam-member: $IAM_MEMBER diff --git a/config-connector/tests/testcases/iam/kpt/kms-key-ring/original_values.yaml b/config-connector/tests/testcases/iam/kpt/kms-key-ring/original_values.yaml deleted file mode 100644 index 709008ad9f7..00000000000 --- a/config-connector/tests/testcases/iam/kpt/kms-key-ring/original_values.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -iam-member: ${IAM_MEMBER?} diff --git a/config-connector/tests/testcases/iam/kpt/kms-key-ring/required_fields_only.yaml b/config-connector/tests/testcases/iam/kpt/kms-key-ring/required_fields_only.yaml deleted file mode 100644 index 909fa7446ba..00000000000 --- a/config-connector/tests/testcases/iam/kpt/kms-key-ring/required_fields_only.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -iam-member: $IAM_MEMBER diff --git a/config-connector/tests/testcases/iam/kpt/member-iam/original_values.yaml b/config-connector/tests/testcases/iam/kpt/member-iam/original_values.yaml deleted file mode 100644 index 2c66915f0b1..00000000000 --- a/config-connector/tests/testcases/iam/kpt/member-iam/original_values.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -project-id: ${PROJECT_ID?} diff --git a/config-connector/tests/testcases/iam/kpt/member-iam/required_fields_only.yaml b/config-connector/tests/testcases/iam/kpt/member-iam/required_fields_only.yaml deleted file mode 100644 index aa68f7a8b79..00000000000 --- a/config-connector/tests/testcases/iam/kpt/member-iam/required_fields_only.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -project-id: $DEFAULT_PROJECT_ID diff --git a/config-connector/tests/testcases/iam/kpt/project-iam/original_values.yaml b/config-connector/tests/testcases/iam/kpt/project-iam/original_values.yaml deleted file mode 100644 index 3a2238aa06c..00000000000 --- a/config-connector/tests/testcases/iam/kpt/project-iam/original_values.yaml +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -member: ${IAM_MEMBER?} -project-id: ${PROJECT_ID?} diff --git a/config-connector/tests/testcases/iam/kpt/project-iam/required_fields_only.yaml b/config-connector/tests/testcases/iam/kpt/project-iam/required_fields_only.yaml deleted file mode 100644 index 149467bb46d..00000000000 --- a/config-connector/tests/testcases/iam/kpt/project-iam/required_fields_only.yaml +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -member: $IAM_MEMBER -project-id: $DEFAULT_PROJECT_ID diff --git a/config-connector/tests/testcases/iam/kpt/pubsub-subscription/original_values.yaml b/config-connector/tests/testcases/iam/kpt/pubsub-subscription/original_values.yaml deleted file mode 100644 index 709008ad9f7..00000000000 --- a/config-connector/tests/testcases/iam/kpt/pubsub-subscription/original_values.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -iam-member: ${IAM_MEMBER?} diff --git a/config-connector/tests/testcases/iam/kpt/pubsub-subscription/required_fields_only.yaml b/config-connector/tests/testcases/iam/kpt/pubsub-subscription/required_fields_only.yaml deleted file mode 100644 index 909fa7446ba..00000000000 --- a/config-connector/tests/testcases/iam/kpt/pubsub-subscription/required_fields_only.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -iam-member: $IAM_MEMBER diff --git a/config-connector/tests/testcases/iam/kpt/pubsub-topic/original_values.yaml b/config-connector/tests/testcases/iam/kpt/pubsub-topic/original_values.yaml deleted file mode 100644 index 709008ad9f7..00000000000 --- a/config-connector/tests/testcases/iam/kpt/pubsub-topic/original_values.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -iam-member: ${IAM_MEMBER?} diff --git a/config-connector/tests/testcases/iam/kpt/pubsub-topic/required_fields_only.yaml b/config-connector/tests/testcases/iam/kpt/pubsub-topic/required_fields_only.yaml deleted file mode 100644 index 909fa7446ba..00000000000 --- a/config-connector/tests/testcases/iam/kpt/pubsub-topic/required_fields_only.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -iam-member: $IAM_MEMBER diff --git a/config-connector/tests/testcases/iam/kpt/service-account/original_values.yaml b/config-connector/tests/testcases/iam/kpt/service-account/original_values.yaml deleted file mode 100644 index 709008ad9f7..00000000000 --- a/config-connector/tests/testcases/iam/kpt/service-account/original_values.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -iam-member: ${IAM_MEMBER?} diff --git a/config-connector/tests/testcases/iam/kpt/service-account/required_fields_only.yaml b/config-connector/tests/testcases/iam/kpt/service-account/required_fields_only.yaml deleted file mode 100644 index 909fa7446ba..00000000000 --- a/config-connector/tests/testcases/iam/kpt/service-account/required_fields_only.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -iam-member: $IAM_MEMBER diff --git a/config-connector/tests/testcases/iam/kpt/storage-bucket-iam/original_values.yaml b/config-connector/tests/testcases/iam/kpt/storage-bucket-iam/original_values.yaml deleted file mode 100644 index bd9a2a1ea0c..00000000000 --- a/config-connector/tests/testcases/iam/kpt/storage-bucket-iam/original_values.yaml +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -bucket-name: ${BUCKET_NAME?} -iam-member: ${IAM_MEMBER?} diff --git a/config-connector/tests/testcases/iam/kpt/storage-bucket-iam/required_fields_only.yaml b/config-connector/tests/testcases/iam/kpt/storage-bucket-iam/required_fields_only.yaml deleted file mode 100644 index 9c866cd34ce..00000000000 --- a/config-connector/tests/testcases/iam/kpt/storage-bucket-iam/required_fields_only.yaml +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -bucket-name: $BUCKET_NAME -iam-member: $IAM_MEMBER diff --git a/config-connector/tests/testcases/iam/kpt/subnet/original_values.yaml b/config-connector/tests/testcases/iam/kpt/subnet/original_values.yaml deleted file mode 100644 index 709008ad9f7..00000000000 --- a/config-connector/tests/testcases/iam/kpt/subnet/original_values.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -iam-member: ${IAM_MEMBER?} diff --git a/config-connector/tests/testcases/iam/kpt/subnet/required_fields_only.yaml b/config-connector/tests/testcases/iam/kpt/subnet/required_fields_only.yaml deleted file mode 100644 index 909fa7446ba..00000000000 --- a/config-connector/tests/testcases/iam/kpt/subnet/required_fields_only.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -iam-member: $IAM_MEMBER diff --git a/config-connector/tests/testcases/projects/kpt/owned-project/original_values.yaml b/config-connector/tests/testcases/projects/kpt/owned-project/original_values.yaml deleted file mode 100644 index 31c6d3db4ed..00000000000 --- a/config-connector/tests/testcases/projects/kpt/owned-project/original_values.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -billing-account: ${BILLING_ACCOUNT_ID?} -folder-id: ${FOLDER_ID?} -iam-member: ${IAM_MEMBER?} -project-id: ${PROJECT_ID?} diff --git a/config-connector/tests/testcases/projects/kpt/owned-project/required_fields_only.yaml b/config-connector/tests/testcases/projects/kpt/owned-project/required_fields_only.yaml deleted file mode 100644 index 3611415d598..00000000000 --- a/config-connector/tests/testcases/projects/kpt/owned-project/required_fields_only.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -billing-account: $BILLING_ACCOUNT_ID -folder-id: $FOLDER_ID -iam-member: $IAM_MEMBER -project-id: $PROJECT_ID-$RANDOM_ID diff --git a/config-connector/tests/testcases/projects/kpt/project-services/original_values.yaml b/config-connector/tests/testcases/projects/kpt/project-services/original_values.yaml deleted file mode 100644 index cda286e03c5..00000000000 --- a/config-connector/tests/testcases/projects/kpt/project-services/original_values.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -project-id: "${PROJECT_ID?}" diff --git a/config-connector/tests/testcases/projects/kpt/project-services/required_fields_only.yaml b/config-connector/tests/testcases/projects/kpt/project-services/required_fields_only.yaml deleted file mode 100644 index aa68f7a8b79..00000000000 --- a/config-connector/tests/testcases/projects/kpt/project-services/required_fields_only.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -project-id: $DEFAULT_PROJECT_ID diff --git a/config-connector/tests/testcases/projects/kpt/shared-vpc/original_values.yaml b/config-connector/tests/testcases/projects/kpt/shared-vpc/original_values.yaml deleted file mode 100644 index c7d3f141ef7..00000000000 --- a/config-connector/tests/testcases/projects/kpt/shared-vpc/original_values.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -billing-account: ${BILLING_ACCOUNT_ID?} -default-namespace: ${DEFAULT_NAMESPACE?} -host-project: ${HOST_PROJECT_ID?} -org-id: ${ORG_ID?} -service-project: ${SERVICE_PROJECT_ID?} diff --git a/config-connector/tests/testcases/projects/kpt/shared-vpc/required_fields_only.yaml b/config-connector/tests/testcases/projects/kpt/shared-vpc/required_fields_only.yaml deleted file mode 100644 index 0de6f643c2b..00000000000 --- a/config-connector/tests/testcases/projects/kpt/shared-vpc/required_fields_only.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -billing-account: $BILLING_ACCOUNT_ID -default-namespace: $DEFAULT_NAMESPACE -host-project: $HOST_PROJECT_ID-$RANDOM_ID -org-id: $ORG_ID -service-project: $SERVICE_PROJECT_ID-$RANDOM_ID diff --git a/config-connector/tests/testcases/projects/kpt/simple-project/original_values.yaml b/config-connector/tests/testcases/projects/kpt/simple-project/original_values.yaml deleted file mode 100644 index 576b21b7ffd..00000000000 --- a/config-connector/tests/testcases/projects/kpt/simple-project/original_values.yaml +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -billing-account: ${BILLING_ACCOUNT_ID?} -org-id: ${ORG_ID?} -project-id: ${PROJECT_ID?} diff --git a/config-connector/tests/testcases/projects/kpt/simple-project/required_fields_only.yaml b/config-connector/tests/testcases/projects/kpt/simple-project/required_fields_only.yaml deleted file mode 100644 index 9d191c4279a..00000000000 --- a/config-connector/tests/testcases/projects/kpt/simple-project/required_fields_only.yaml +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -billing-account: $BILLING_ACCOUNT_ID -org-id: $ORG_ID -project-id: $PROJECT_ID-$RANDOM_ID diff --git a/config-connector/tests/testcases/sql/kpt/mysql-ha/original_values.yaml b/config-connector/tests/testcases/sql/kpt/mysql-ha/original_values.yaml deleted file mode 100644 index a1633cc890e..00000000000 --- a/config-connector/tests/testcases/sql/kpt/mysql-ha/original_values.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -instance-name: mysql-ha-solution -test-pw: ${PASSWORD_1?} -test2-pw: ${PASSWORD_2?} -test3-pw: ${PASSWORD_3?} diff --git a/config-connector/tests/testcases/sql/kpt/mysql-ha/required_fields_with_sql_instance_name.yaml b/config-connector/tests/testcases/sql/kpt/mysql-ha/required_fields_with_sql_instance_name.yaml deleted file mode 100644 index d5a382e7fcf..00000000000 --- a/config-connector/tests/testcases/sql/kpt/mysql-ha/required_fields_with_sql_instance_name.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -instance-name: $INSTANCE_NAME-$RANDOM_ID -test-pw: $PASSWORD -test2-pw: $PASSWORD -test3-pw: $PASSWORD diff --git a/config-connector/tests/testcases/sql/kpt/mysql-public/original_values.yaml b/config-connector/tests/testcases/sql/kpt/mysql-public/original_values.yaml deleted file mode 100644 index b427f8e5f33..00000000000 --- a/config-connector/tests/testcases/sql/kpt/mysql-public/original_values.yaml +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -instance-name: mysql-public-solution -password: ${PASSWORD?} diff --git a/config-connector/tests/testcases/sql/kpt/mysql-public/required_fields_with_sql_instance_name.yaml b/config-connector/tests/testcases/sql/kpt/mysql-public/required_fields_with_sql_instance_name.yaml deleted file mode 100644 index ce745261037..00000000000 --- a/config-connector/tests/testcases/sql/kpt/mysql-public/required_fields_with_sql_instance_name.yaml +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -instance-name: $INSTANCE_NAME-$RANDOM_ID -password: $PASSWORD diff --git a/config-connector/tests/testcases/sql/kpt/postgres-ha/original_values.yaml b/config-connector/tests/testcases/sql/kpt/postgres-ha/original_values.yaml deleted file mode 100644 index 1aba294229f..00000000000 --- a/config-connector/tests/testcases/sql/kpt/postgres-ha/original_values.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -instance-name: postgres-ha-solution -password-1: ${PASSWORD_1?} -password-2: ${PASSWORD_2?} -password-3: ${PASSWORD_3?} -username-1: ${USERNAME_1?} -username-2: ${USERNAME_2?} -username-3: ${USERNAME_3?} diff --git a/config-connector/tests/testcases/sql/kpt/postgres-ha/required_fields_with_sql_instance_name.yaml b/config-connector/tests/testcases/sql/kpt/postgres-ha/required_fields_with_sql_instance_name.yaml deleted file mode 100644 index 0fcd7591c05..00000000000 --- a/config-connector/tests/testcases/sql/kpt/postgres-ha/required_fields_with_sql_instance_name.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -instance-name: $INSTANCE_NAME-$RANDOM_ID -password-1: $PASSWORD -password-2: $PASSWORD -password-3: $PASSWORD -username-1: $USERNAME_1 -username-2: $USERNAME_2 -username-3: $USERNAME_3 diff --git a/config-connector/tests/testcases/sql/kpt/postgres-public/original_values.yaml b/config-connector/tests/testcases/sql/kpt/postgres-public/original_values.yaml deleted file mode 100644 index d82c7306b76..00000000000 --- a/config-connector/tests/testcases/sql/kpt/postgres-public/original_values.yaml +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -instance-name: postgres-public-solution -password: ${PASSWORD?} diff --git a/config-connector/tests/testcases/sql/kpt/postgres-public/required_fields_with_sql_instance_name.yaml b/config-connector/tests/testcases/sql/kpt/postgres-public/required_fields_with_sql_instance_name.yaml deleted file mode 100644 index ce745261037..00000000000 --- a/config-connector/tests/testcases/sql/kpt/postgres-public/required_fields_with_sql_instance_name.yaml +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -instance-name: $INSTANCE_NAME-$RANDOM_ID -password: $PASSWORD diff --git a/config-connector/tests/testcases/test_values.template b/config-connector/tests/testcases/test_values.template deleted file mode 100644 index 6913f02e364..00000000000 --- a/config-connector/tests/testcases/test_values.template +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/dm/.gitignore b/dm/.gitignore deleted file mode 100644 index da8b8b82a74..00000000000 --- a/dm/.gitignore +++ /dev/null @@ -1,9 +0,0 @@ -/.tox -/.coverage -/*.yaml -*.pyc -__pycache__ -/build -/dist -*.egg-info -/venv diff --git a/dm/CHANGELOG.md b/dm/CHANGELOG.md deleted file mode 100644 index cb79b8ce08f..00000000000 --- a/dm/CHANGELOG.md +++ /dev/null @@ -1,195 +0,0 @@ -# Cloud Foundation Toolkit Change Log - -All notable changes to this project will be documented in this file. - -## CFT Templates - -### 11.12.2020 - -*BREAKING CHANGE* -- The CFT maintener team decided to drop support for alpha features such as `actions`. In some cases this mean loosing functionalities. This is in due to improve the codebase maintanability. -- [#850](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/pull/850) removes the use of `DM actions (alpha)` - -### 17.06.2020 - -- Extending the url_map template to support defaultUrlRedirect - -### 27.03.2020 - -- BugFix Unmanaged Instance Group template to support network as selfLink. [#616](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/pull/616) - -### 10.03.2020 - -- Extending the IAM member binding template to support bindings on CloudFunctions with `gcp-types/cloudfunctions-v1:virtual.projects.locations.functions.iamMemberBinding`. [#591](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/pull/591) -- Update project template fixing usage export bucket related bug. [#601](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/pull/601) - -### 18.02.2020 - -- Update cloudsql template to support multiple instances in a deployment with the same names for databases and users [#573](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/pull/573) (Breaking change) - -### 30.01.2020 - -- Update forwarding rule template and external loadbalancer to support labels [#551](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/pull/551). Fixing tests - -### 21.01.2020 - -- Update on target proxy and external loadbalancer templates to properly support a list of sslCertificates -- Update on healthcheck template (v1.1.0) to support legacy and non-legacy healthchecks. [#542](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/pull/542) - -### 16.01.2020 - -- Updated gke template to support setting for releaseChannel [#539](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/pull/539) - -### 15.01.2020 - -- Updated cloud sql template to fix intermittent Python 3 warnings [#538](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/pull/538) - -### 08.01.2020 - -- Updated gke template to support Python 3 [#531](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/pull/531) - -### 23.12.2019 - -- Fixed template `cloud_router` schema [#524](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/pull/524) - -### 13.12.2019 - -- Limited ports to use with TCP external LB template according to documentation [#514] (https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/pull/514) - -### 13.12.2019 - -- Updated test triggers for Github. From now on every bats test will be triggered - -### 11.12.2019 - -- Updated logging sink configuration to export entries to a desired destination in external project [#77](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/pull/496) -- Added Stackdriver Notification Channels template [#432](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/pull/485) - -### 09.12.2019 Ho-ho-ho - -- SSL-Certificate template supports beta features (managed certificate). This update is backwards compatible. [#505](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/pull/505) - -### 09.12.2019 - -- Added 'resource_policy' DM template [#497](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/pull/497) - -### 05.12.2019 - -- Updated internal LB and external LB templates according to backend_service.py.schema change [#476](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/pull/476) - -### 02.12.2019 - -- IAM Member template support bindings on types which implement `gcp-types/storage-v1:virtual.buckets.iamMemberBinding` like syntax. ( currently storage-v1.) - -### 25.11.2019 - -- In `cloud_sql.py`, added support for PostgreSQL 11 & fixed `ipAddress` output [#477](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/pull/477) - -### 22.11.2019 - -- Fixed sharedVPC for GKE use case behaviour in 'project' DM template [#469](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/pull/469) - -### 22.11.2019 - -- Cloud Build Trigger support for Github [#470](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/pull/463) -- CI example for Github triggering Cloudbuild for PRs - -### 21.11.2019 - -- Added support for unified Stackdriver Kubernetes Engine Monitoring [#463](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/pull/463) -- Add explicit dependencies to the 'iam_member' DM template to avoid fail, in case of a large amount of bindings (30+) [#443](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/pull/443) - -### 18.11.2019 - -- The [GCS Bucket template](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/tree/ocsig-patch-storage1/dm/templates/gcs_bucket) supports gcp-types/storage-v1:virtual.buckets.iamMemberBinding. [#453](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/pull/453) - -### 31.10.2019 - -- New helper template to use firewall with Google important IP ranges, stored in YAML [#370](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/pull/370) - -### 29.10.2019 - -- Bigquery template schema supports single region as location - -### 25.10.2019 - -- Fixed examples for ELB to pass the new (strict) schema validation [#392](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/pull/392) -- Added support for GKE on SharedVPC for the Project Factory and better visibility for the GKE Service Accounts [#385](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/pull/385) -- Fixed IAM member binding schema to truly support the lack of project property. [#384](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/pull/384) -- The CFT CLI (go version) support complex cross deployment references [#359](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/pull/359) -- Added a Docker container for running bats tests on your local source code for template developers [#355](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/pull/355) -- New exapmle: [Instance with a private IP](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/blob/master/dm/templates/instance/examples/instance_private.yaml) [#346](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/pull/346) -- New template: [Cloud Filestore](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/tree/master/dm/templates/cloud_filestore) [#348](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/pull/348) - -### 17.09.2019 - -- New template: [Unamanged Instance Group](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/tree/master/dm/templates/unmanaged_instance_group) -- CFT Instance (DM) template support sourceInstanceTemplate property instead of properties of the instance [#330](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/pull/330) -- New examples for CloudSQL with [private IP](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/blob/master/dm/templates/cloud_sql/examples/cloud_sql_private_network.yaml) -- The [pubsub template](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/tree/master/dm/templates/pubsub) supports subscription expiration -- Github PRs are now automatically [triggering](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/pull/312) CloudBuild based tests - -### 03.09.2019 - - **cft-dm-dev branch merged to master** - -#### Major updates which may break your current deployments: -- Switching legacy types to 'gcp-types' -- Switching to the latest API where it is possible -- Adding unique hash to resource names in case of iteration - - This is fixing many issues with iterations but it is a breaking change - -#### Non-breaking changes: -- Adding versioning for the templates -- Adding new properties for templates, schemas where it's applicable -- Adding support for Labels for every resource where it's possible -- Adding cross project resource creation where it is possible -- Locking down schemas: - - Tight check on invalid properties to catch typos instead of ignoring them - - Tight check on combination of properties. ( For example a project can't be - a host and a guest (VPC) project at the same time.) - -#### CI improvements: -- Our CI environment is running tests on the current master and dev branch - - Running schema validation checks on the example yamls where it's applicable - - Running integration tests on all the templates -- CloudBuild containers and jobs running the tests in a test organization - - Currently working on local container based testing with local source code - -### 23.08.2019 - -- Adding container images for test automation -- Finalizing 'cft-dm-dev' branch for merge to master - -### 21.03.2019 - -- *Templates/iam_member*: The template is now using virtual.projects.iamMemberBinding which is and advanced -endpoint to manage IAM membership. This is a fix for concurrent IAM changes error. - - This change should be 100% backwards compatible - - This template should solve concurrency error with built in retries - - *Templates/project*: This template had concurrent IAM changes error. This update utilizes the iam_member - CFT template, which is referenced in the project.py.schema file. No more concurancy error! - -### 20.03.2019 - - - *Example Solutions*: The first exmaple demonstrate how to use Wrapper templates. - - *Specific wrapper* template to modify the behaviour of an external template such as a CFT template - - *Generic Wrapper* template to inject configuration for every template regardless of it's behaviour. - -### 19.03.2019 - - - *CloudDNS*: Changed CloudDNS Record set from actions to use gcp-types which gives native support for the API. - -## CFT CLI - -### 0.0.4 - -- Feature: Cross deployment refference support output lookup of complex DM resources - -### 0.0.3 - -- Feature: Cross deployment refference support complex outputs such as hashmap, list and their combination. - -### 0.0.2 - -- Initial version diff --git a/dm/CI/cft_base_container/Dockerfile b/dm/CI/cft_base_container/Dockerfile deleted file mode 100644 index c6349a9c8fa..00000000000 --- a/dm/CI/cft_base_container/Dockerfile +++ /dev/null @@ -1,27 +0,0 @@ -FROM gcr.io/google.com/cloudsdktool/cloud-sdk -# gcr.io/cloud-builders/gcloud - -RUN set -ex && apt-get update && apt-get -y install make \ - && apt-get -y install gettext-base \ - && python3 --version \ - && pip3 install --upgrade pip \ - && pip3 --version \ - && pip3 install setuptools \ - && git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit \ - && cd cloud-foundation-toolkit/dm \ - && rm -rf templates \ - && pip3 install tox \ - && pip3 install pytest wheel \ - && make build \ - && make install \ - && make cft-venv \ - && make template-prerequisites \ - && make cft-prerequisites \ - && . venv/bin/activate \ - && ./src/cftenv \ - && pwd \ - && cft --version \ - && bats -v \ - && which bats - -WORKDIR /cloud-foundation-toolkit/dm diff --git a/dm/CI/cft_base_container/cloudbuild.yaml b/dm/CI/cft_base_container/cloudbuild.yaml deleted file mode 100644 index 521a0005c04..00000000000 --- a/dm/CI/cft_base_container/cloudbuild.yaml +++ /dev/null @@ -1,13 +0,0 @@ -steps: -- name: 'gcr.io/cloud-builders/docker' - args: ['build', '-t', 'gcr.io/$PROJECT_ID/cft:${_CFT_VERSION}', - '-t', 'gcr.io/$PROJECT_ID/cft', - '--build-arg', 'CFT_VERSION=${_CFT_VERSION}', - '.'] -substitutions: - _CFT_VERSION: 0.0.4 - -images: -- 'gcr.io/$PROJECT_ID/cft:latest' -- 'gcr.io/$PROJECT_ID/cft:$_CFT_VERSION' -tags: ['cft-test-dm'] diff --git a/dm/CI/cft_base_contianer/Dockerfile b/dm/CI/cft_base_contianer/Dockerfile deleted file mode 100644 index 6988d7270a2..00000000000 --- a/dm/CI/cft_base_contianer/Dockerfile +++ /dev/null @@ -1,27 +0,0 @@ -FROM gcr.io/cloud-builders/gcloud - - - -RUN set -ex && apt-get update && apt-get -y install make \ - && apt-get -y install gettext-base \ - && pip install --upgrade pip \ - && pip install setuptools \ - && git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit \ - && cd cloud-foundation-toolkit/dm \ - && rm -rf templates \ - && pip install tox \ - && pip install pytest wheel \ - && make build \ - && make install \ - && make cft-venv \ - && make template-prerequisites \ - && make cft-prerequisites \ - && . venv/bin/activate \ - && ./src/cftenv \ - && pwd \ - && cft --version \ - && bats -v \ - && which bats - - -WORKDIR /cloud-foundation-toolkit/dm diff --git a/dm/CI/cft_base_contianer/cft_base_container b/dm/CI/cft_base_contianer/cft_base_container deleted file mode 120000 index 4760512fe10..00000000000 --- a/dm/CI/cft_base_contianer/cft_base_container +++ /dev/null @@ -1 +0,0 @@ -cft_base_container \ No newline at end of file diff --git a/dm/CI/cft_base_contianer/cloudbuild.yaml b/dm/CI/cft_base_contianer/cloudbuild.yaml deleted file mode 100644 index 521a0005c04..00000000000 --- a/dm/CI/cft_base_contianer/cloudbuild.yaml +++ /dev/null @@ -1,13 +0,0 @@ -steps: -- name: 'gcr.io/cloud-builders/docker' - args: ['build', '-t', 'gcr.io/$PROJECT_ID/cft:${_CFT_VERSION}', - '-t', 'gcr.io/$PROJECT_ID/cft', - '--build-arg', 'CFT_VERSION=${_CFT_VERSION}', - '.'] -substitutions: - _CFT_VERSION: 0.0.4 - -images: -- 'gcr.io/$PROJECT_ID/cft:latest' -- 'gcr.io/$PROJECT_ID/cft:$_CFT_VERSION' -tags: ['cft-test-dm'] diff --git a/dm/CI/cft_schema_runner/Dockerfile b/dm/CI/cft_schema_runner/Dockerfile deleted file mode 100644 index 1053fabc63f..00000000000 --- a/dm/CI/cft_schema_runner/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -FROM gcr.io/cloud-builders/gcloud - -RUN apt-get update -RUN apt-get install python-setuptools -y -RUN apt-get install npm -y -RUN apt-get install jq -y -RUN pip install yq -RUN npm install -g ajv-cli -RUN ln -s /usr/bin/nodejs /usr/bin/node - -COPY docker-entrypoint.sh /root/ -RUN chmod 777 /root/docker-entrypoint.sh - -ENTRYPOINT ["/root/docker-entrypoint.sh"] - -CMD [] diff --git a/dm/CI/cft_schema_runner/cloudbuild-test.yaml b/dm/CI/cft_schema_runner/cloudbuild-test.yaml deleted file mode 100644 index 362cd929115..00000000000 --- a/dm/CI/cft_schema_runner/cloudbuild-test.yaml +++ /dev/null @@ -1,177 +0,0 @@ -# find . -name "*.yam"l | grep examples| sort -n | awk '{print "- name: 'gcr.io/\$PROJECT_ID/cft-schema'\n args: [`"$1"`]"}' | sed "s/\`/'/g" - -steps: -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/autoscaler/examples/autoscaler_regional.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/autoscaler/examples/autoscaler_zonal.yaml'] -# bug -# - name: gcr.io/$PROJECT_ID/cft-schema -# args: ['./templates/backend_service/examples/backend_service_global.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/backend_service/examples/backend_service_regional.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/bastion/examples/bastion.yaml'] -# Skip, complex example -# - name: gcr.io/$PROJECT_ID/cft-schema -# args: ['./templates/bigquery/examples/bigquery.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/cloudbuild/examples/cloudbuild_reposource.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/cloudbuild/examples/cloudbuild_storagesource.yaml'] -# FAILING -# - name: gcr.io/$PROJECT_ID/cft-schema -# args: ['./templates/cloudbuild/examples/cloudbuild_trigger.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/cloudbuild/examples/cloudbuild.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/cloud_function/examples/cloud_function_upload.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/cloud_function/examples/cloud_function.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/cloud_router/examples/cloud_router.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/cloud_spanner/examples/cloud_spanner.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/cloud_sql/examples/cloud_sql_read_replica.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/cloud_sql/examples/cloud_sql.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/cloud_tasks/examples/cloud_tasks_queue.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/cloud_tasks/examples/cloud_tasks_task.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/dataproc/examples/dataproc.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/dns_managed_zone/examples/dns_managed_zone_legacy.yaml'] -# FIXME -# - name: gcr.io/$PROJECT_ID/cft-schema -# args: ['./templates/dns_managed_zone/examples/dns_managed_zone_private_visibility_config.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/dns_managed_zone/examples/dns_managed_zone_private.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/dns_managed_zone/examples/dns_managed_zone_public.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/dns_managed_zone/examples/dns_managed_zone.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/dns_records/examples/dns_records.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/external_load_balancer/examples/external_load_balancer_https.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/external_load_balancer/examples/external_load_balancer_http.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/external_load_balancer/examples/external_load_balancer_ssl.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/external_load_balancer/examples/external_load_balancer_tcp.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/firewall/examples/firewall.yaml'] -# FIXME -# - name: gcr.io/$PROJECT_ID/cft-schema -# args: ['./templates/folder/examples/folder.yaml'] -# FIXME -# - name: gcr.io/$PROJECT_ID/cft-schema -# args: ['./templates/forseti/examples/forseti.yaml'] -# FIXME -# - name: gcr.io/$PROJECT_ID/cft-schema -# args: ['./templates/forwarding_rule/examples/forwarding_rule_global.yaml'] -# FIXME -# - name: gcr.io/$PROJECT_ID/cft-schema -# args: ['./templates/forwarding_rule/examples/forwarding_rule_regional.yaml'] -# - name: gcr.io/$PROJECT_ID/cft-schema -# args: ['./templates/gcs_bucket/examples/gcs_bucket_iam_bindings.yaml'] -# - name: gcr.io/$PROJECT_ID/cft-schema -# args: ['./templates/gcs_bucket/examples/gcs_bucket_lifecycle.yaml'] -# SCHEMA version issue -# - name: gcr.io/$PROJECT_ID/cft-schema -# args: ['./templates/gcs_bucket/examples/gcs_bucket.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/gke/examples/gke_regional_private.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/gke/examples/gke_regional.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/gke/examples/gke_zonal.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/haproxy/examples/haproxy.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/healthcheck/examples/healthcheck.yaml'] -# FIXME -# - name: gcr.io/$PROJECT_ID/cft-schema -# args: ['./templates/iam_custom_role/examples/iam_custom_role.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/iam_member/examples/iam_member.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/instance/examples/instance.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/instance_template/examples/instance_template.yaml'] -# Schema faulty? -# - name: gcr.io/$PROJECT_ID/cft-schema -# args: ['./templates/interconnect_attachment/examples/interconnect_attachment_dedicated.yaml'] -# Schema faulty? -# - name: gcr.io/$PROJECT_ID/cft-schema -# args: ['./templates/interconnect_attachment/examples/interconnect_attachment_partner.yaml'] -# - name: gcr.io/$PROJECT_ID/cft-schema -# FIXME -# args: ['./templates/interconnect/examples/interconnect_dedicated.yaml'] -# FIXME -# - name: gcr.io/$PROJECT_ID/cft-schema -# args: ['./templates/internal_load_balancer/examples/internal_load_balancer.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/ip_reservation/examples/ip_reservation.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/kms/examples/kms_signkey.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/kms/examples/kms.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/logsink/examples/billingaccount_logsink_bucket_destination.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/logsink/examples/folder_logsink_bq_destination.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/logsink/examples/org_logsink_pubsub_destination.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/logsink/examples/project_logsink_bucket_destination.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/managed_instance_group/examples/managed_instance_group_healthcheck.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/managed_instance_group/examples/managed_instance_group.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/nat_gateway/examples/nat_gateway.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/network/examples/network.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/network_peering/examples/network_peering.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/org_policy/examples/org_policy.yaml'] -# FIXME -# - name: gcr.io/$PROJECT_ID/cft-schema -# args: ['./templates/project/examples/project.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/pubsub/examples/pubsub_push.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/pubsub/examples/pubsub.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/route/examples/route.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/runtime_config/examples/runtime_config.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/shared_vpc_subnet_iam/examples/shared_vpc_subnet_iam_bindings.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/shared_vpc_subnet_iam/examples/shared_vpc_subnet_iam_legacy.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/shared_vpc_subnet_iam/examples/shared_vpc_subnet_iam_policy.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/ssl_certificate/examples/ssl_certificate.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/stackdriver_metric_descriptor/examples/stackdriver_metric_descriptor.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/target_proxy/examples/target_proxy_https.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/target_proxy/examples/target_proxy_http.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/target_proxy/examples/target_proxy_ssl.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/target_proxy/examples/target_proxy_tcp.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/url_map/examples/url_map.yaml'] -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/vpn/examples/vpn.yaml'] -tags: ['cft-dm-schema-runner'] diff --git a/dm/CI/cft_schema_runner/cloudbuild.yaml b/dm/CI/cft_schema_runner/cloudbuild.yaml deleted file mode 100644 index d967a6435bb..00000000000 --- a/dm/CI/cft_schema_runner/cloudbuild.yaml +++ /dev/null @@ -1,13 +0,0 @@ -steps: -- name: 'gcr.io/cloud-builders/docker' - args: ['build', '-t', 'gcr.io/$PROJECT_ID/cft-schema:${_CFT_VERSION}', - '-t', 'gcr.io/$PROJECT_ID/cft-schema', - '--build-arg', 'CFT_VERSION=${_CFT_VERSION}', - '.'] -substitutions: - _CFT_VERSION: 0.0.4 - -images: -- 'gcr.io/$PROJECT_ID/cft-schema:latest' -- 'gcr.io/$PROJECT_ID/cft-schema:$_CFT_VERSION' -tags: ['cft-test-dm'] diff --git a/dm/CI/cft_schema_runner/docker-entrypoint.sh b/dm/CI/cft_schema_runner/docker-entrypoint.sh deleted file mode 100644 index 8692d98e2ce..00000000000 --- a/dm/CI/cft_schema_runner/docker-entrypoint.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/bash -set -eu - -readonly GIT_URL='https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit' -readonly CLONE_DIRNAME="$(mktemp -d)" -readonly BRANCH_NAME="cft-dm-dev" - -readonly COLOR_RESET='\033[0m' -readonly COLOR_BOLD='\033[1m' -readonly COLOR_BG_BLUE='\033[44m' - -echo_color() { - echo -e "${COLOR_BOLD}${COLOR_BG_BLUE}$@${COLOR_RESET}" -} - -echo_color "Cloning repo" -git clone "${GIT_URL}" "${CLONE_DIRNAME}" -cd "${CLONE_DIRNAME}" -git checkout "${BRANCH_NAME}" - -echo_color 'Initializing CFT DM templates' - -cd dm/templates - -# cat healthcheck/examples/healthcheck.yaml | yq .resources[0].properties > project.json; cat healthcheck/healthcheck.py.schema | yq . > project.py.schema.json; ajv validate -s project.py.schema.json -d project.json - -EXAMPLE_COUNT=`cat $@ | yq '.resources | length'` -EXAMPLE_COUNT=$(($EXAMPLE_COUNT-1)) - -while [ $EXAMPLE_COUNT -ge 0 ]; -do - echo_color "Example $EXAMPLE_COUNT" - cat $@ | yq .resources[$EXAMPLE_COUNT].properties > example.json; - cat example.json - export SCHEMA_PATH=`cat $@ | yq -r .imports[0].path | awk '{print $1".schema"}'` - echo_color $SCHEMA_PATH - cat $SCHEMA_PATH | yq . > example.py.schema.json; - echo_color "Schema validation" - ajv validate -s example.py.schema.json -d example.json - EXAMPLE_COUNT=$(($EXAMPLE_COUNT-1)) - -done diff --git a/dm/CI/cft_test_runner/Dockerfile b/dm/CI/cft_test_runner/Dockerfile deleted file mode 100644 index fc4044f120c..00000000000 --- a/dm/CI/cft_test_runner/Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -FROM gcr.io/cft-test-workspace-221111/cft:latest - -COPY cloud-foundation-tests.conf /etc/cloud-foundation-tests.conf -RUN cat /etc/cloud-foundation-tests.conf -RUN chmod 666 /etc/cloud-foundation-tests.conf -COPY docker-entrypoint.sh /root/ -RUN chmod 777 /root/docker-entrypoint.sh - - -WORKDIR /cloud-foundation-toolkit/dm - -ENTRYPOINT ["/root/docker-entrypoint.sh"] - -CMD [] diff --git a/dm/CI/cft_test_runner/cloud-foundation-tests.conf b/dm/CI/cft_test_runner/cloud-foundation-tests.conf deleted file mode 100644 index 9ecd0be0ae6..00000000000 --- a/dm/CI/cft_test_runner/cloud-foundation-tests.conf +++ /dev/null @@ -1,5 +0,0 @@ -export CLOUD_FOUNDATION_ORGANIZATION_ID="12345678 -export CLOUD_FOUNDATION_PROJECT_ID="project_ID" -export CLOUDDNS_CROSS_PROJECT_ID="project_ID2" -export CLOUD_FOUNDATION_BILLING_ACCOUNT_ID="123456-789ABCD-000111" -export CLOUD_FOUNDATION_USER_ACCOUNT="user@cft.tips" diff --git a/dm/CI/cft_test_runner/cloudbuild-test.yaml b/dm/CI/cft_test_runner/cloudbuild-test.yaml deleted file mode 100644 index b5a13eea485..00000000000 --- a/dm/CI/cft_test_runner/cloudbuild-test.yaml +++ /dev/null @@ -1,10 +0,0 @@ -steps: -- name: 'gcr.io/$PROJECT_ID/cft-ci-test' - args: ['${_BATS_TEST_FILE}', '${_BRANCH_NAME}'] - -substitutions: - _BATS_TEST_FILE: ./templates/healthcheck/tests/integration/healthcheck.bats # default value - _BRANCH_NAME: master # default value - -tags: ['cft-dm-test-runner'] -timeout: '7200s' diff --git a/dm/CI/cft_test_runner/cloudbuild.yaml b/dm/CI/cft_test_runner/cloudbuild.yaml deleted file mode 100644 index 85861c89e58..00000000000 --- a/dm/CI/cft_test_runner/cloudbuild.yaml +++ /dev/null @@ -1,13 +0,0 @@ -steps: -- name: 'gcr.io/cloud-builders/docker' - args: ['build', '-t', 'gcr.io/$PROJECT_ID/cft-ci-test:${_CFT_VERSION}', - '-t', 'gcr.io/$PROJECT_ID/cft-ci-test', - '--build-arg', 'CFT_VERSION=${_CFT_VERSION}', - '.'] -substitutions: - _CFT_VERSION: 0.0.4 - -images: -- 'gcr.io/$PROJECT_ID/cft-ci-test:latest' -- 'gcr.io/$PROJECT_ID/cft-ci-test:$_CFT_VERSION' -tags: ['cft-test-dm'] diff --git a/dm/CI/cft_test_runner/docker-entrypoint.sh b/dm/CI/cft_test_runner/docker-entrypoint.sh deleted file mode 100644 index 37e4fd7e0b5..00000000000 --- a/dm/CI/cft_test_runner/docker-entrypoint.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/bin/bash -set -eu - -readonly GIT_URL='https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit' -readonly CLONE_DIRNAME="/workspace" -readonly DM_root="/cloud-foundation-toolkit/dm" - -readonly COLOR_RESET='\033[0m' -readonly COLOR_BOLD='\033[1m' -readonly COLOR_BG_BLUE='\033[44m' - -echo_color() { - echo -e "${COLOR_BOLD}${COLOR_BG_BLUE}$@${COLOR_RESET}" -} - -echo_color 'Activating venv for testing' - -cd "${DM_root}" - -set +u # Turn off because virtualenv uses undefined variables -. venv/bin/activate \ -./src/cftenv -set -u - -export CLOUD_FOUNDATION_CONF=/etc/cloud-foundation-tests.conf - -if [ -d "/workspace/dm" ] -then - echo_color "/workspace/dm exists, no need to clone repo" -else - echo_color "/workspace/dm is missing, cloning repo" - readonly BRANCH_NAME=$2 - git clone "${GIT_URL}" "${CLONE_DIRNAME}" - cd "${CLONE_DIRNAME}" - git checkout "${BRANCH_NAME}" -fi - -mv "/workspace/dm/templates" "${DM_root}" - -echo_color "Welcome your Majesty, ready to run some tests!" - -# Running bats tests relative to dm folder for example: "./templates/project/tests/integration/project.bats" - -cd "${DM_root}" - -chmod 777 $1 -exec bats $1 diff --git a/dm/CI/triggers/cft-triggers.py b/dm/CI/triggers/cft-triggers.py deleted file mode 100644 index 9a34f14cd21..00000000000 --- a/dm/CI/triggers/cft-triggers.py +++ /dev/null @@ -1,32 +0,0 @@ -# This is a wrapper tamplete to crawl through the dm/templates folder and -# create a trigger for each. -# This is not a generic template, used for CFT GitHub PR testing - -import copy - - -def generate_config(context): - - tests = [] - resources = [] - for test in context.imports: - if '/tests/integration/' in test: - testData = test.split('/') - testFolder = testData[3] - batsFile = testData[6] - - props = copy.deepcopy(context.properties) - props['description'] = props['description'].replace('#template#', batsFile[:-5]) - props['substitutions']['_BATS_TEST_FILE'] = \ - props['substitutions']['_BATS_TEST_FILE'].replace( - '#template#', testFolder).replace( - '#templatetest#', batsFile) - for i in range(len(props['includedFiles'])): - props['includedFiles'][i] = props['includedFiles'][i].replace( - '#template#', testFolder) - resources.append({ - 'type': "cft-trigger.py", - 'name': context.env['name'] + "-" + batsFile[:-5], - 'properties': props}) - - return {'resources': resources} diff --git a/dm/CI/triggers/cft-triggers.py.schema b/dm/CI/triggers/cft-triggers.py.schema deleted file mode 100644 index cc9b8fada72..00000000000 --- a/dm/CI/triggers/cft-triggers.py.schema +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Cloud Build Triggers for CI - author: Adam Ocsvari - description: | - Supports creation of an automated Cloud Build trigger. - -imports: - - path: ../../templates/cloudbuild/trigger.py - name: cft-trigger.py diff --git a/dm/CI/triggers/cloudbuild_trigger_cft.yaml b/dm/CI/triggers/cloudbuild_trigger_cft.yaml deleted file mode 100644 index 8702abcf160..00000000000 --- a/dm/CI/triggers/cloudbuild_trigger_cft.yaml +++ /dev/null @@ -1,53 +0,0 @@ -# This config+template creates a Cloud build trigger for every CFT DM template -# by crawling through the repository. For this crawl we use glob import which -# needs to be enabled: -# -# gcloud config set deployment_manager/glob_imports True -# -# Currently the repo is hardcoded. - -imports: - - path: cft-triggers.py - name: cft-triggers.py -# Using the gcloud glob import to load one file from every template where -# triggers should be created. This is inderictly passing the list of possible -# templates to DM without hardcoding that into a config list. - - path: ../../templates/**/tests/integration/*.bats - -resources: -# - name: cft-github-dev -# type: cft-triggers.py -# properties: -# description: CFT-DM-#template# PR trigger [cft-dm-dev] -# disabled: False -# github: -# name: cloud-foundation-toolkit -# owner: GoogleCloudPlatform -# pullRequest: -# branch: cft-dm-dev -# commentControl: COMMENTS_ENABLED -# filename: dm/CI/cft_test_runner/cloudbuild-test.yaml -# substitutions: -# _BATS_TEST_FILE: ./templates/#template#/tests/integration/#templatetest# -# includedFiles: -# - dm/templates/#template#/*.py -# - dm/templates/#template#/*.schema -# - dm/templates/#template#/tests/integration/* - - name: cft-github-master - type: cft-triggers.py - properties: - description: CFT-DM-#template# PR trigger [Master] - disabled: False - github: - name: cloud-foundation-toolkit - owner: GoogleCloudPlatform - pullRequest: - branch: master - commentControl: COMMENTS_ENABLED - filename: dm/CI/cft_test_runner/cloudbuild-test.yaml - substitutions: - _BATS_TEST_FILE: ./templates/#template#/tests/integration/#template#.bats - includedFiles: - - dm/templates/#template#/*.py - - dm/templates/#template#/*.schema - - dm/templates/#template#/tests/integration/* \ No newline at end of file diff --git a/dm/MANIFEST.in b/dm/MANIFEST.in deleted file mode 100644 index ddf938cd250..00000000000 --- a/dm/MANIFEST.in +++ /dev/null @@ -1 +0,0 @@ -include VERSION requirements/install.txt requirements/development.txt diff --git a/dm/Makefile b/dm/Makefile deleted file mode 100644 index 0ba73228293..00000000000 --- a/dm/Makefile +++ /dev/null @@ -1,70 +0,0 @@ -PYTHON ?= python3 -SHELL := /bin/bash -PACKAGE = cloud_foundation_toolkit - -ifndef CLOUD_FOUNDATION_CONF -override CLOUD_FOUNDATION_CONF = ~/.cloud-foundation-tests.conf -endif - -help: - @echo "cft-prerequisites Installs prerequisites for CFT python utility development" - @echo "cft-venv Creates the virtual environment called 'venv' for development" - @echo "cft-clean-venv Deletes the development environment" - @echo "cft-test Runs all unit tests from outside of the venv (for CI tools)" - @echo "cft-test-venv Runs all unit tests from withing the venv (for active development)" - @echo "template-prerequisites Install prerequisites for template development" - @echo "build Builds the package" - @echo "install Installs the package system wide" - @echo "uninstall Uninstalls the package" - @echo "clean Cleanup build/test/cache files" - - -.ONESHELL: -.PHONY: cft-prerequisites cft-venv cft-clean-venv cft-test cft-test-venv template-prerequisites cft-build-base-image - -cft-prerequisites: - ${PYTHON} -m pip install -r requirements/prerequisites.txt - -cft-venv: - ${PYTHON} -m tox -e venv - -cft-clean-venv: - rm -rf venv - -cft-test: - ${PYTHON} -m tox -- -x tests/unit - -cft-test-templates: - ${PYTHON} -m tox -- -x tests/templates - -cft-test-venv: - ${PYTHON} -m pytest -v - -cft-build-base-image: - docker build -f CI/cft_base_container/Dockerfile -t cft_base_image CI/cft_base_container/ - -cft-test-bats: - docker run -it --rm \ - -v `pwd`:/workspace \ - --entrypoint "/bin/bash" \ - -v ~/.config/:/root/.config \ - -v $(CLOUD_FOUNDATION_CONF):/root/.cloud-foundation-tests.conf \ - cft_base_image \ - -c "cd /workspace && /cloud-foundation-toolkit/dm/venv/bin/bats /workspace/$(TEST)" - -template-prerequisites: - rm -rf bats && git clone https://github.com/sstephenson/bats.git && ./bats/install.sh venv && rm -rf bats - -build: - ${PYTHON} setup.py sdist bdist_wheel - -install: - ${PYTHON} -m pip install dist/${PACKAGE}-$$(cat VERSION)-py2.py3-none-any.whl - -uninstall: - ${PYTHON} -m pip uninstall ${PACKAGE} -y - -clean: - find src/${PACKAGE} \( -path '*__pycache__/*' -o -name __pycache__ \) -delete - find tests \( -path '*__pycache__/*' -o -name __pycache__ \) -delete - rm -rf build dist *.egg-info .cache .eggs .coverage diff --git a/dm/README.md b/dm/README.md deleted file mode 100644 index 548e0e4f6c7..00000000000 --- a/dm/README.md +++ /dev/null @@ -1,28 +0,0 @@ -# Dear CFT User! - -If you are looking to build new GCP infrastructure, we recommend that you use [Terraform CFT modules](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/blob/master/docs/terraform.md) -Terraform CFT supports the most recent GCP resources, reflects GCP best practices can be used off-the-shelf to quickly build a repeatable enterprise-ready foundation. -Additionally, if you are a looking to manage your GCP resources through Kubernetes, consider using [Config Connector CFT solutions](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/tree/master/config-connector/solutions). - -# Cloud Foundation Toolkit Project - -## Overview - -The Cloud Foundation toolkit (henceforth, CFT) includes the following parts: - -- A comprehensive set of production-ready resource templates that follow - Google's best practices, which can be used with the CFT or the gcloud - utility (part of the Google Cloud SDK) - see - [the template directory](templates/README.md) -- A command-line interface (henceforth, CLI) that deploys resources defined in - single or multiple CFT-compliant config files - see: - - The CFT source Python files (the `src/` directory) - - The [CFT User Guide](docs/userguide.md) - -In addition, the CFT repository includes a sample pipeline that enables running -CFT deployment operations from Jenkins - see the -[pipeline directory](pipeline/README.md). - -## License - -Apache 2.0 - See [LICENSE](LICENSE) for more information. diff --git a/dm/VERSION b/dm/VERSION deleted file mode 100644 index 81340c7e72d..00000000000 --- a/dm/VERSION +++ /dev/null @@ -1 +0,0 @@ -0.0.4 diff --git a/dm/docs/template_dev_guide.md b/dm/docs/template_dev_guide.md deleted file mode 100644 index 05ae3ba3333..00000000000 --- a/dm/docs/template_dev_guide.md +++ /dev/null @@ -1,162 +0,0 @@ -# Template Developer Guide - - - -- [Overview](#overview) -- [Prerequisites](#prerequisites) -- [Testing](#testing) - - [Bats Installation](#bats-installation) - - [Testing Environment Setup](#testing-environment-setup) - - [Using the Cloud Foundation Config File](#using-the-cloud-foundation-config-file) - - [Using environment variables](#using-environment-variables) - - [Running Tests](#running-tests) - - [Temporary Files and Fixtures](#temporary-files-and-fixtures) - - - -## Overview - -The Cloud Foundation toolkit (henceforth, CFT) includes the following parts: - -- A comprehensive set of [production-ready resource templates](../templates/README.md) - that follow Google's best practices, which can be used with the CFT or the - gcloud utility (part of the Google Cloud SDK) -- A command-line interface (henceforth, CLI) that deploys resources defined in - single or multiple CFT-compliant config files - see the - [CFT User Guide](userguide.md) - -This Guide is intended for the developers who are planning to modify the -existing templates or create new ones. - -## Prerequisites - -1. Install and set up the [Google Cloud SDK](https://cloud.google.com/sdk/). -2. Install the template development prerequisites: - -```shell -make template-prerequisites -``` - -## Testing - -The template consistency and quality control in this project are backed by -simple integration tests using the -[Bats testing framework](https://github.com/sstephenson/bats). - -### Bats Installation - -To install Bats: - -1. Follow the instructions on the Bats - [website](https://github.com/sstephenson/bats). -2. Make sure the `bats` executable is in your PATH. -3. Alternatively, set up a *development environment* as described in the - [CFT Developer Guide](tool_dev_guide.md). - -### Testing Environment Setup - -#### Using the Cloud Foundation Config File - - -To run tests, you need to modify the organization, project, and -account-specific values in the configuration file. Proceed as follows: - -1. Copy `tests/cloud-foundation-tests.conf.example` to - `~/.cloud-foundation-tests.conf`. -2. Change the values as required. - -`Note:` You can modify the configuration file path by changing the -CLOUD_FOUNDATION_CONF environment variable. For example: - -```shell -export CLOUD_FOUNDATION_CONF=/etc/cloud-foundation-tests.conf -``` - -You need to enter the site-specific information (for yourself or for your -organization) in the test config file. See, for example, -`tests/cloud-foundation-tests.conf.example`. - -#### Using environment variables - -An alternative to using the Cloud Foundation config file is to use environment -variables. Make sure to export all variables described in the -`tests/cloud-foundation-tests.conf.example` file, with your organization-specific -changes. - -### Running Tests - -`Note:` Currently, only one test file can be executed at a time. - -Always run the test from the root of the `cloud-foundation` project: - -```shell -./templates/network/tests/integration/network.bats - ✓ Creating deployment my-gcp-project-network from my-gcp-project-network.yaml - ✓ Verifying resources were created in deployment my-gcp-project-network - ✓ Verifying subnets were created in deployment my-gcp-project-network - ✓ Deployment Delete - ✓ Verifying resources were deleted in deployment my-gcp-project-network - ✓ Verifying subnets were deleted in deployment my-gcp-project-network -``` - -For the sake of consistency, keep the test files similar, as much as possible, -to the *example configs* available in each template's `examples/` directory. - - -### Running Bats tests with docker image - -#### Prepare environment - -Authenticate your local gcloud tool with your personal user account https://cloud.google.com/sdk/gcloud/reference/auth/login or -using service account json https://cloud.google.com/sdk/gcloud/reference/auth/activate-service-account - -Create test config file by following [Using the Cloud Foundation Config File](#using-the-cloud-foundation-config-file) - -Build test dcocker image: - - cd cloud-foundation-toolkit/dm - make cft-build-base-image - -#### Run test - -To run templates/instance/tests/integration/instance.bats file, run: - - make cft-test-bats TEST=templates/instance/tests/integration/instance.bats - -Or, if you have config file not in ~/.cloud-foundation-tests.conf: - - make cft-test-bats TEST=templates/instance/tests/integration/instance.bats CLOUD_FOUNDATION_CONF=/conf/file/location.json - - -#### Unit Tests - - -This testing mode is typically used when running tests from a CI tool. - -Use `tox` to create the necessary virtual environment and run tests: - -```shell -make cft-test-templates -``` - -### Temporary Files and Fixtures - -When running tests, temporary Deployment Manager configs and fixtures -are often created and deleted by the *teardown()* function. - -Due to the fact that a DM config file must be located relative to the -template(s) it uses, the configs are usually created in the root of the -project. For example, in the `network` template, the config -`.${CLOUD_FOUNDATION_PROJECT_ID}-network.yaml` will be temporarily created -(and deleted at the end of the execution). - -Other temporary files are created under `/tmp`; for example: - -```shell -/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-network.txt -/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-project.txt -``` - -The names of the "artifacts" could change. However, if a problem is observed -during the test execution, the root and the /tmp directory are good places to -look for hints about what had caused the problem. diff --git a/dm/docs/tool_dev_guide.md b/dm/docs/tool_dev_guide.md deleted file mode 100644 index e3072c155f2..00000000000 --- a/dm/docs/tool_dev_guide.md +++ /dev/null @@ -1,155 +0,0 @@ -# CFT Developer Guide - - - -- [Overview](#overview) -- [Prerequisites](#prerequisites) - - [Google Cloud SDK](#google-cloud-sdk) - - [Development Environment](#development-environment) -- [Unit Tests](#unit-tests) - - [From Outside the Development Environment](#from-outside-the-development-environment) - - [From Within the Development Environment](#from-within-the-development-environment) - - - -## Overview - -The Cloud Foundation toolkit (henceforth, CFT) includes the following parts: -project is comprised of two parts: - -- A comprehensive set of [production-ready resource templates](../templates/README.md) - that follow Google's best practices, which can be used with the CFT or the - gcloud utility (part of the Google Cloud SDK) -- A command-line interface (henceforth, CLI) that deploys resources defined in - single or multiple CFT-compliant config files - see the - [CFT User Guide](userguide.md) - -This Guide is intended for the developers who are planning to modify and/or -programmatically interface with the CFT. - -## Prerequisites - -### Google Cloud SDK - -1. Install the [Google Cloud SDK](https://cloud.google.com/sdk/), which - includes the `gcloud` CLI. - -Because the SDK is not in *pypi*, its installation cannot be easily -automated from within this project, due to the fact that users on the different -platforms need the different packages. Follow the SDK installation instructions -for your platform. - -2. Ensure that the `gcloud` CLI is in your user PATH (because the CFT uses - this CLI to find the location of the Python libraries included in the SDK). - -The `gcloud` CLI is usually placed in the PATH automatically when you: - -- Install the SDK via the official package manager for your OS (RPM, DEB, - etc.), or -- Use the installer (`install.sh`) bundled in a Linux tarball - -However, if you used neither of the above installation methods, you need to -ensure that `gcloud` can be found in one of the directories specified by the -PATH environment variable. - -### Development Environment - -The CFT development environment is based on: - -- [Tox](https://tox.readthedocs.io/en/latest/index.html) for streamlined - management of Python virtual environments -- [pytest](https://docs.pytest.org/en/latest/contents.html) for unit tests - -Proceed as follows: - -1. Install Tox with the system Python. -2. Install CFT prerequisites: - -```shell -sudo make cft-prerequisites -``` - -The CFT development is carried out in a virtual environment. - -3. Create the virtual development environment called `venv` with `tox` in - the root of the project directory: - -```shell -make cft-venv -``` - -4. Activate the virtual environment: - -```shell -source venv/bin/activate -source src/cftenv -``` - -The above activates the virtual environment, then finds the Google SDK path -and adds libraries to PYTHONPATH. These cannot be simply added to the -`Makefile` because `make` creates sanitized sub-shells for each command, and -the parent shell does not get the environment variables that the virtual -environment sets up on activation. - -`Note:` The `tox.ini` file in this project is configured to -"*install*" the utility using pip's "develop" mode, i.e., the pip **does not** -actually package and install the utility in the virtual environment's -`site-packages`. - -5. To install or update any of the packages in your virtual environment - (created by `tox`), delete and re-create the environment: - -- *Deactivate* the virtual environment (if it has been activated): - -```shell -deactivate -unset CLOUDSDK_ROOT_DIR CLOUDSDK_PYTHON_SITEPACKAGES PYTHONPATH -``` - -- Delete the deactivated virtual environment: - -```shell -make cft-clean-venv -``` - -- Create the environment as described in Step 3 above. - -## Unit Tests - -You can run the CFT unit tests either from withing your development -environment or from outside of it. - -### From Outside the Development Environment - -This testing mode is typically used when running tests from a CI tool. - -1. Use `tox` to create the necessary virtual environments (not `venv`, which - is used only for active development): - -```shell -make cft-test -``` - -2. Run all the tests within the "test" virtual environments. - -### From Within the Development Environment - -This testing mode is typically used while actively developing within the -development virtual environment. - -1. Activate the `venv` environment as shown in Step 4 of the - [Development Environment](#development-environment) section. -2. Source `src/cftdev` to get PYTHONPATH set as shown in Step 5 of the - [Development Environment](#development-environment) section. -3. Run tests as follows: - -```shell -# use the make target to run all tests: -make cft-test-venv - -# alternatively, use pytest directly to run all tests: -python -m pytest -v - -# alternatively, run a single test file: -python -m pytest -v tests/unit/test_deployment.py -``` \ No newline at end of file diff --git a/dm/docs/userguide.md b/dm/docs/userguide.md deleted file mode 100644 index 966cc5db81b..00000000000 --- a/dm/docs/userguide.md +++ /dev/null @@ -1,728 +0,0 @@ - -# Cloud Foundation Toolkit - User Guide - - - -- [Overview](#overview) -- [CFT Configs](#cft-configs) - - [Extra YAML Directives](#extra-yaml-directives) - - [name](#name) - - [project](#project) - - [description](#description) - - [Extra Features](#extra-features) - - [Cross-deployment References with the `$(out)` Tag](#cross-deployment-references-with-the-out-tag) - - [Jinja Templating](#jinja-templating) - - [Samples](#samples) - - [network.yaml](#networkyaml) - - [firewall.yaml](#firewallyaml) - - [instance.yaml](#instanceyaml) -- [Templates](#templates) -- [Toolkit Installation and Configuration](#toolkit-installation-and-configuration) - - [Installing Prerequisites](#installing-prerequisites) - - [Python 2.7 + pip](#python-27--pip) - - [Google Cloud SDK](#google-cloud-sdk) - - [Getting the CFT Code](#getting-the-cft-code) - - [Installing the CFT](#installing-the-cft) - - [Uninstalling the CFT](#uninstalling-the-cft) - - [Updating the CFT](#updating-the-cft) -- [CLI Usage](#cli-usage) - - [Syntax](#syntax) - - [Actions](#actions) - - [The "create" Action](#the-create-action) - - [The "update" Action](#the-update-action) - - [The "apply" Action](#the-apply-action) - - [The "delete" Action](#the-delete-action) - - - -## Overview - -The GCP Deployment Manager service does not support cross-deployment -references, and the `gcloud` utility does not support concurrent deployment of -multiple inter-dependent configs. The `Cloud Foundation toolkit` (henceforth, -`CFT`) expands the capabilities of Deployment Manager and `gcloud` to support -the following scenarios: - -- Creation, update, and deletion of multiple deployments in a single operation - which: - - Accepts multiple config files as input - - Automatically resolves dependencies between these configs - - Creates/updates deployments in the dependency-stipulated order, or - deletes deployments in a reverse dependency order -- Cross-deployment (including cross-project) referencing of deployment outputs, - which removes the need for hard-coding many parameters in the configs - -For example, if config file `A` contained all network resources, config file -`B` contained all instances, and config `C` contained firewall rules, router, -and VPN, in `gcloud` you would need to *manually* define the config deployment -order according to the resource dependencies. The VPN would depend on the cloud -router, both of them would depend on the network, etc. The `CFT` computes the -dependencies *automatically*, which eliminates the need for manual deployment -ordering. - -`Note:` This User Guide assumes that you are familiar with the Google Cloud SDK -operations related to resource deployment and management. For additional -information, refer to the -[SDK documentation](https://cloud.google.com/sdk/docs/). - -The CFT includes: - -- A command-line interface (henceforth, CLI) that deploys resources defined in - single or multiple CFT-compliant config files -- A comprehensive set of production-ready resource [templates](#templates) that follow - Google's best practices, which can be used with the CFT or the `gcloud` - utility. (`gcloud` is part of the Google Cloud SDK). - -You can use the CFT "as is" or modify it to suit your specific needs. Instructions -and recommendations for the CFT code modifications are in the -[CFT Developer Guide](tool_dev_guide.md). - -## CFT Configs - -To use the CFT, you need to first create the config files for the desired -deployments. These configs are YAML structures very similar to, and compatible -with, the `gcloud` config files. The difference is that they contain extra YAML -directives and features to support the expanded capabilities of the CFT -(multi-config deployment and cross-deployment references). - -### Extra YAML Directives - -#### name - -This directive is used to specify the name of the deployment; for example: - -```yaml -name: my-network -``` - -If not specified, the name of the deployment is inferred from the config -file name. For example, if the path to the config file is -`path/to/configs/my-network.yaml`, and the config does not specify the `name` -directive, the deployment name is set to `my-network`. This is meant as a -workaround for maintaining compatibility between the `CFT` and `gcloud` configs. -However, **it is strongly recommended that the `name` directive is specified**. - -#### project - -This directive defines the project in which the resource is deployed; for -example: - -```yaml -project: my-project -``` - -While this directive is optional, **its use in your configs is highly -recommended**. In addition to the project directive in the config file, -the project for a deployment to be created in can be specified by other means. -The order of precedence is as follows: - -1. The `--project` command-line option. If a project is specified via this - option, all configs in the run use that project. This is a way of - quickly overriding the project specified in a config file, which should be - used with caution. -2. The `project` directive in the config file. -3. The `CLOUD_FOUNDATION_PROJECT_ID` environment variable. -4. The "default project" configured with the GCP SDK. - -`Note:` When deployments utilize cross-project resources, the `project` -directive becomes mandatory in at least one of the deployments. - -#### description - -This directive is the deployment description, which allows you -to add some documentation to your configs; for example: - -```yaml -description: My firewall deployment for {{environment}} environment -``` - -### Extra Features - -#### Cross-deployment References with the `$(out)` Tag - -A config/deployment can specify a dependency on another deployment's output -without the need to create the dependent deployment in advance. This is the -mechanism the CFT uses to determine the order of execution of the deployments. - -```yaml -$(out....) - -# or - -$(out...) -``` - -wherein: - -- `$(out)` is the prefix that indicates that the value references an output - from a resource defined in an external deployment (in another config file) -- `project` is the ID of the project in which the external deployment is - created -- `deployment` is the he name of the external deployment (config) that - defines the referenced resource -- `resource` is the DM name of the referenced resource -- `output` is the name of the output parameter to be referenced - -The above construct works very similarly to Deployment Manager's -`$(ref..)`. However, it allows defining not only references -to resource properties not only *within* a deployment, but also -*inter-deployment/inter-project* references, using deployment outputs. The -value of output of a dependent deployment is only looked up during the current -deployment's execution, which allows you to create config files without knowing -in advance the actual values of the outputs in the dependent deployments, or -even having to create these deployments. - -For example: - -```yaml -network: $(out.my-network-prod.my-network-prod.name) -``` - -#### Jinja Templating - -All configs submitted via the CFT CLI are rendered by the [Jinja Template -Engine](http://jinja.pocoo.org/). This supports compact code by using the DRY -pattern. For example, by using variable substitution and `for loops`: - -```yaml -{% set environment = 'prod' %} -{% set applications = ['app1', 'app2', 'app3'] %} - -name: my-network-{{environment}} -description: Network deployment for {{environment}} environment -project: sourced-gus-1 -imports: - - path: templates/network/network.py -resources: -{% for application in applications %} - - type: templates/network/network.py - name: {{application}}-{{environment}}-network - properties: - autoCreateSubnetworks: false -{% endfor %} -``` - -An alternative to using Jinja in your configs is to write wrapper DM Python -templates and reference these templates in your configs (see the -[Templates](#templates) section). - -### Samples - -Following are three sample config files that illustrate the above directives -and features. These will be used as examples in the action-specific sections of -this User Guide: - -- [network.yaml](#network.yaml) - two networks that have no dependencies -- [firewall.yaml](#firewall.yaml) - two firewall rules, which depend on the - corresponding networks -- [instance.yaml](#instance.yaml) - one VM instance, which depends on the - network - -#### network.yaml - -```yaml -name: my-networks -description: my networks deployment - -imports: - - path: templates/network/network.py - -resources: - - type: templates/network/network.py - name: my-network-prod - properties: - autoCreateSubnetworks: true - - - type: templates/network/network.py - name: my-network-dev - properties: - autoCreateSubnetworks: false -``` - -#### firewall.yaml - -```yaml -name: my-firewalls -description: My firewalls deployment - -imports: - - path: templates/firewall/firewall.py -resources: - - type: templates/firewall/firewall.py - name: my-firewall-prod - properties: - network: $(out.my-networks.my-network-prod.name) - rules: - - name: allow-proxy-from-inside-prod - allowed: - - IPProtocol: tcp - ports: - - "80" - - "444" - description: This rule allows connectivity to the HTTP proxies - direction: INGRESS - sourceRanges: - - 10.0.0.0/8 - - name: allow-dns-from-inside-prod - allowed: - - IPProtocol: udp - ports: - - "53" - - IPProtocol: tcp - ports: - - "53" - description: this rule allows DNS queries to google's 8.8.8.8 - direction: EGRESS - destinationRanges: - - 8.8.8.8/32 - - type: templates/firewall/firewall.py - name: my-firewall-dev - properties: - network: $(out.my-networks.my-network-dev.name) - rules: - - name: allow-proxy-from-inside-dev - allowed: - - IPProtocol: tcp - ports: - - "80" - - "444" - description: This rule allows connectivity to the HTTP proxies - direction: INGRESS - sourceRanges: - - 10.0.0.0/8 -``` - -#### instance.yaml - -```yaml -name: my-instance-prod-1 -description: My instance deployment for prod environment - -imports: - - path: templates/instance/instance.py - name: instance.py - -resources: - - name: my-instance-prod-1 - type: instance.py - properties: - zone: us-central1-a - diskImage: projects/ubuntu-os-cloud/global/images/family/ubuntu-1804-lts - diskSizeGb: 100 - machineType: f1-micro - diskType: pd-ssd - network: $(out.my-networks.my-network-prod.name) - metadata: - items: - - key: startup-script - value: sudo apt-get update && sudo apt-get install -y nginx -``` - -## Templates - -CFT-compliant configs can use templates written in Python or Jinja2. [Templates -included in the toolkit](../templates/README.md) are recommended (although not mandatory) -as they offer robust functionality, ease of use, and adherence to best -practices. - -You can use the templates included in our library "as is," and/or modify them -to suit your needs, as well as develop your own templates. Instructions and -recommendations for template development are in the -[Template Developer Guide](template_dev_guide.md). - -## Toolkit Installation and Configuration - -This toolkit was developed primarily on/for Linux. Therefore, the Linux platform -is expected to offer the most seamless user experience. - -### Installing Prerequisites - -#### Python 2.7 + pip - -Follow your OS package manager instructions. For example, for Ubuntu: - -```shell -sudo apt-get install python2.7 -sudo apt-get install python-pip -``` - -#### Google Cloud SDK - -1. Install the [Google Cloud SDK](https://cloud.google.com/sdk/docs/quickstarts). -2. Ensure that the `gcloud` command is in the user's PATH: - -```shell -which gcloud -``` - -### Getting the CFT Code - -Proceed as follows: - -```shell -git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit -cd cloud-foundation-toolkit/ -``` - -### Installing the CFT - -Proceed as follows: - -```shell -cd dm -sudo make cft-prerequisites # Installs prerequisites in system python -make build # builds the package -sudo make install # installs the package in /usr/local -``` - -### Uninstalling the CFT - -If you need to uninstall the CFT, proceed as follows: - -```shell -sudo make uninstall -``` - -### Updating the CFT - -To update CFT to a newer version, proceed as follows: - -```shell -cd dm -make clean -sudo make cft-prerequisites -make build -sudo make uninstall -sudo make install -``` - -## CLI Usage - -### Syntax - -The CLI commands adhere to the following syntax: - -```shell -cft [action] [configs] [action-options] -``` - -The above syntactic structure includes the following elements: - -- `[action]` - one of the supported actions/commands: - - **create** - creates deployments defined in the specified config files, - in the dependency order - - **update** - updates deployments defined in the specified config files, - in the dependency order - - **apply** - checks if the resources defined in the specified configs - already exist; if they do, updates them; if they don't, creates them - - **delete** - deletes deployments defined in the specified config files, - in the reverse dependency order -- `[config]` - The path(s) to the config files to be affected by the specified - action (files with extensions `.yaml`, `.yml`, or `.jinja`). It can be: - - A space-separated list of paths to the config files and/or directories, - optionally with wildcards; for example: - - ../deployments/config_1.yaml ../tests/test*.yaml ../dev/config*.yml - - ../deployments/ ../tests/ - this will submit all files with extensions - `.yaml`, `.yml`, or `.jinja` found in the ../deployments/ and ../tests/ - directories - - A space-separated list of yaml-serialized strings, each representing a - config; useful when another tool is generating configs on the fly\ - For example: `name: my-networks\nproject: my-project\nimports:\n - path: templates/network/network.py\n name: network.py\resources:\n - type: templates/network/network.py\n name: my-network-prod` -- `[action-options]` - one or more action-specific options; see the - action-specific `--help` option for details: - -```shell -cft --help -usage: cft [-h] [--version] [--project PROJECT] [--dry-run] - [--verbosity VERBOSITY] - {apply,create,update,delete} ... - -positional arguments: - {apply,create,update,delete} - -optional arguments: - -h, --help show this help message and exit - --version, -v Print version information and exit - --project PROJECT The ID of the GCP project in which ALL config files - will be executed. This option will override the - "project" directive in the config files, so be careful - when using this - --dry-run Prints the order of execution of the configs. No - changes are made - --verbosity VERBOSITY - The log level -``` - -### Actions - -The `CFT` parses the submitted config files and computes the dependencies -between them. Based on the computed dependency graph, the script -determines the sequence of deployments to be executed. It then proceeds to -execute the action in the computed order. - -#### The "create" Action - -`Note:` Make sure that the deployments you are going to create do not exist in -your DM. An attempt to create a deployment that already exists will result in -an error. Yon can, however, do one of the following: - -- Use the **update** action to update the existing deployments - see - [The "update" Action](#the-update-action) section -- Use the **apply** action which will attempt to create the deployment if it - doesn't already exist in DM, or update the deployment it already exist - see - [The "apply" Action](#the-apply-action) section - -To create multiple deployments, in the CLI, type: - -```shell -cft create [configs] [create-options] -``` - -If you submit the [sample configs described above](#samples) - -```shell -cft create instance.yaml firewall.yaml network.yaml -``` - -the following response appears in the CLI terminal: - -```shell ----------- Stage 1 ---------- -Waiting for insert my-network-prod (fingerprint 7OyDHEL8-ZGbay4dTcXXEg==) [operation-1538159159516-576f2964f9b61-e64bdb44-8ab51124]...done. -NAME TYPE STATE ERRORS INTENT -my-network-dev compute.v1.network COMPLETED [] -my-network-prod compute.v1.network COMPLETED [] ----------- Stage 2 ---------- -Waiting for insert my-instance-prod-1 (fingerprint tdbkal-dX_ppamFJVtBGew==) [operation-1538159204094-576f298f7d030-9707b687-a3f822d9]...done. -NAME TYPE STATE ERRORS INTENT -my-instance-prod-1 compute.v1.instance COMPLETED [] -Waiting for insert my-firewall-prod (fingerprint Yuhd7khES_en86QtLYFV8w==) [operation-1538159238360-576f29b02abc2-b29dacc3-1b74eb12]...done. -NAME TYPE STATE ERRORS INTENT -allow-dns-from-inside-prod compute.beta.firewall COMPLETED [] -allow-proxy-from-inside-dev compute.beta.firewall COMPLETED [] -allow-proxy-from-inside-prod compute.beta.firewall COMPLETED [] ----------- Stage 3 ---------- -Waiting for insert my-instance-prod-2 (fingerprint z-lJJimsanFI6cIYLU8D_w==) [operation-1538159270905-576f29cf344a8-d28b6852-52527e20]...done. -NAME TYPE STATE ERRORS INTENT -my-instance-prod-2 compute.v1.instance COMPLETED [] -``` - -In this example, the network config has no dependencies, and the firewall and -instance configs depend on the network. Therefore, the network config is -deployed first (Stage 1), and the firewall and instance are deployed next -(Stage 2). - -`Note:` The order in which the configs are provided in the `cft create` command -does not affect the deployment creation order. That order is defined -exclusively by the dependency between the configs, which is, in turn, defined -by analyzing and ordering the cross-dependency tokens (`$(out.a.b.c.d)`). - -The following conditions will result in the action failure, -with an error message displayed: - -- One or more of the specified deployments already exist -- One or more resources in the submitted config files depend on resources that - neither exist nor being created by the current `create` action -- One or more of the submitted config files are invalid -- One or more of the submitted config files contain circular dependencies - (i.e., deployment A depends on deployment B, and B depends on A) - -#### The "update" Action - -`Note:` Make sure that the deployments you are going to update already exist in -DM. An attempt to update deployment that does not exist will result in an -error. Yon can, however, do one of the following: - -- Use the **create** action to create the required deployments - see - [The "create" Action](#the-create-action) section -- Use the **apply** action which will attempt to create the deployment if it - doesn't already exist in DM, or update the deployment it already exist - see - [The "apply" Action](#the-apply-action) section - -To update multiple configs, in the CLI, type: - -```shell -cft update [configs] [create-options] -``` - -If you submit the [sample configs described above](#samples) - -```shell -cft update instance.yaml firewall.yaml network.yaml -``` - -the following response appears in the CLI terminal: - -```shell ----------- Stage 1 ---------- -Waiting for update my-network-prod (fingerprint 7OyDHEL8-ZGbay4dTcXXEg==) [operation-1538159159516-576f2964f9b61-e64bdb44-8ab51124]...done. -NAME TYPE STATE ERRORS INTENT -my-network-dev compute.v1.network COMPLETED [] -my-network-prod compute.v1.network COMPLETED [] ----------- Stage 2 ---------- -Waiting for update my-instance-prod-1 (fingerprint tdbkal-dX_ppamFJVtBGew==) [operation-1538159204094-576f298f7d030-9707b687-a3f822d9]...done. -NAME TYPE STATE ERRORS INTENT -my-instance-prod-1 compute.v1.instance COMPLETED [] -Waiting for update my-firewall-prod (fingerprint Yuhd7khES_en86QtLYFV8w==) [operation-1538159238360-576f29b02abc2-b29dacc3-1b74eb12]...done. -NAME TYPE STATE ERRORS INTENT -allow-dns-from-inside-prod compute.beta.firewall COMPLETED [] -allow-proxy-from-inside-dev compute.beta.firewall COMPLETED [] -allow-proxy-from-inside-prod compute.beta.firewall COMPLETED [] ----------- Stage 3 ---------- -Waiting for update my-instance-prod-2 (fingerprint z-lJJimsanFI6cIYLU8D_w==) [operation-1538159270905-576f29cf344a8-d28b6852-52527e20]...done. -NAME TYPE STATE ERRORS INTENT -my-instance-prod-2 compute.v1.instance COMPLETED [] -``` - -In this example, the network config has no dependencies, and the firewall and -instance configs depend on the network. Therefore, the network config is -updated first (Stage 1), and the firewall and instance are updated next -(Stage 2). - -The following conditions will result in the actin failure, with an error -message displayed: - -- One or more of the specified deployments do not exist -- One or more resources in the submitted config files depend on resources that - do not exist -- One or more of the submitted config files are invalid -- One or more of the submitted config files contain circular dependencies - (i.e., deployment A depends on deployment B, and B depends on A) - -You can use the `--preview` option with the `update` action; for example: - -```shell -cft update test/fixtures/configs/ --preview -``` - -The CFT puts each deployment in the `preview` mode within DM, displays a -preview of the action results, and enables you to approve/decline the action -for each of the submitted configs. The following prompt is displayed after -the Stage 1 log: - -```shell -Update(u), Skip (s), or Abort(a) Deployment? -``` - -Having reviewed the displayed information, enter one of the following -responses: - -- **u (update)** - confirms the deployment change as shown in the preview -- **s (skip)** - cancels the update (no change) and continues to the next - config in the sequence -- **a (abort)** - cancels the update (no change) and aborts the script - execution - -#### The "apply" Action - -The **apply** action makes the CFT decide which deployments must be created -(because they do not exist), and which ones must be updated (because they do -exist). - -To create or update multiple configs, in the CLI, type: - -```shell -cft apply [configs] [create-options] -``` - -If you submit the [sample configs described above](#samples) - -```shell -cft apply instance.yaml firewall.yaml network.yaml -``` - -the following response appears in the CLI terminal: - -```shell ----------- Stage 1 ---------- -Waiting for update my-network-prod (fingerprint 7OyDHEL8-ZGbay4dTcXXEg==) [operation-1538159159516-576f2964f9b61-e64bdb44-8ab51124]...done. -NAME TYPE STATE ERRORS INTENT -my-network-dev compute.v1.network COMPLETED [] -my-network-prod compute.v1.network COMPLETED [] ----------- Stage 2 ---------- -Waiting for update my-instance-prod-1 (fingerprint tdbkal-dX_ppamFJVtBGew==) [operation-1538159204094-576f298f7d030-9707b687-a3f822d9]...done. -NAME TYPE STATE ERRORS INTENT -my-instance-prod-1 compute.v1.instance COMPLETED [] -Waiting for update my-firewall-prod (fingerprint Yuhd7khES_en86QtLYFV8w==) [operation-1538159238360-576f29b02abc2-b29dacc3-1b74eb12]...done. -NAME TYPE STATE ERRORS INTENT -allow-dns-from-inside-prod compute.beta.firewall COMPLETED [] -allow-proxy-from-inside-dev compute.beta.firewall COMPLETED [] -allow-proxy-from-inside-prod compute.beta.firewall COMPLETED [] ----------- Stage 3 ---------- -Waiting for update my-instance-prod-2 (fingerprint z-lJJimsanFI6cIYLU8D_w==) [operation-1538159270905-576f29cf344a8-d28b6852-52527e20]...done. -NAME TYPE STATE ERRORS INTENT -my-instance-prod-2 compute.v1.instance COMPLETED [] -``` - -The following conditions will result in the action failure, with an error -message displayed: - -- One or more resources in the submitted config files depend on resources that - neither exist nor being created by the current `apply` action -- One or more of the submitted config files are invalid -- One or more of the submitted config files contain circular dependencies - (i.e., deployment A depends on deployment B, and B depends on A) - -You can use the `--preview` option with the `apply` action; for example: - -```shell -cft apply test/fixtures/configs/ --preview -``` - -The CFT puts each deployment in the `preview` mode within DM, displays a -preview of the action results, and enables you to approve/decline the action -for each of the submitted configs. The following prompt is displayed after -the Stage 1 log: - -```shell -Update(u), Skip (s), or Abort(a) Deployment? -``` - -Having reviewed the displayed information, enter one of the following -responses: - -- **u (update)** - confirms the deployment change as shown in the preview -- **s (skip)** - cancels the update (no change) and continues to the next - config in the sequence -- **a (abort)** - cancels the update (no change) and aborts the script - execution - -`Note:` If the `apply` action is creating (rather than updating) a set of -resources, and if you choose to skip the creation of a deployment on which -subsequent deployments depends (e.g., **skip** network in Stage 1 and -**update** firewall in Stage 2), the operation will fail with an error message. - -#### The "delete" Action - -To delete the previously created/updated multiple deployments, in the CLI, type: - -```shell -cft delete [configs] [create-options] -``` - -If you submit the [sample configs described above](#samples) - -```shell -cft delete instance.yaml firewall.yaml network.yaml -``` - -the following response appears in the CLI terminal: - -```shell ----------- Stage 1 ---------- -Waiting for delete my-instance-prod-2 (fingerprint 3IWMMfbjsUWjtWgvs6Evdw==) [operation-1538159406282-576f2a504f510-2dceed8f-b222b564]...done. ----------- Stage 2 ---------- -Waiting for delete my-instance-prod-1 (fingerprint ifQgUyTSOtVE1H6VgaIlYA==) [operation-1538159505990-576f2aaf66170-fcc5246d-2d44d005]...done. -Waiting for delete my-firewall-prod (fingerprint xFs1fcZiLJPVV1hUw61-og==) [operation-1538159629835-576f2b2581af9-a83468de-d3685d90]...done. ----------- Stage 3 ---------- -Waiting for delete my-network-prod (fingerprint EhMN6C5IeADJYRo40CmuAg==) [operation-1538159649120-576f2b37e5f02-35da3a44-cf279bfa]...done. -``` - -The order of execution for `delete` is reversed (compared to `create` or -`update`). This prevents DM from attempting to delete, for example, a network -resource while an instance resource (dependent on the network) still exists. - -`Note:` The CFT silently ignores deletion of deployments that do not exits. -This covers those cases where the deletion of a specific deployment had -failed and the problem was then fixed. You do not have to figure out which -deployments to delete; you simply re-run the command. diff --git a/dm/example-solutions/wrapper-template/README.md b/dm/example-solutions/wrapper-template/README.md deleted file mode 100644 index a31dfe64057..00000000000 --- a/dm/example-solutions/wrapper-template/README.md +++ /dev/null @@ -1,35 +0,0 @@ -# Wrapper Templates - -Using wrapper templates is a clean way to extend or restrict already existing templates without modifying them. -This is a common practice when using templates from external sources for example from the Cloud Foundation Toolkit. - -## Flexible solutions - -Python provides easy access to the properties which will be passed forward to the GCP APIs. A wrapper template -is a good place to manipulate properties, for example to enforce naming conventions. - -## Naming convention - Folders wrapper - -In the *folders-wrapper.py* at line 11-12 the template is modifying the *Display Name* of the folders by adding a prefix. -This simple example can be easily extended, the prefix can be loaded from an external configuration file, the naming convention -should be calculated by a helper function, implemented in a shared helper class. - -### Schema file of the wrapper - -If the wrapper class is for a specific template ( in this case for the CFT Folders template), a Schema file can be -used for the following: - - - Importing the target template makes the YAML easier and explicitly states the template dependency - - Copying the required and optional property definition from the target template enforces the property validation in an earlier - stage. ( Unfortunately referencing to another Schema file is not possible today.) - - Comments in the Schema file explains the usage and the purpose of it. - - ## Generic wrapper - - Using a generic wrapper fits into the concept of hierarchical configuration management when the configuration properties - of the deployment are coming from multiple external files, not only the starting YAML. (See ../../hierarchical_configuration) - The generic wrapper is able to inject the context aware properties and pass them to the target template which is defined in - the starting YAML. - - A nice trick is to import the target template in the YAML file and name it as "target-template.py", this makes you able to - use the same wrapper template with any YAML/Target template combination. \ No newline at end of file diff --git a/dm/example-solutions/wrapper-template/folders-wrapper.py b/dm/example-solutions/wrapper-template/folders-wrapper.py deleted file mode 100644 index fe201530ed7..00000000000 --- a/dm/example-solutions/wrapper-template/folders-wrapper.py +++ /dev/null @@ -1,21 +0,0 @@ - -def generate_config(context): - - # Using some global values from an external config file. - # Hardcoded for this example. - - global_prefix = "acc " - - # Manipulate context.properties # - - for folder in context.properties["folders"]: - folder["displayName"] = global_prefix + folder["displayName"] - - # Passing values forward to CFT template - - return { - 'resources': [{ - 'type': "cft-folder.py", - 'name': context.env['name'], - 'properties': context.properties}] - } diff --git a/dm/example-solutions/wrapper-template/folders-wrapper.py.schema b/dm/example-solutions/wrapper-template/folders-wrapper.py.schema deleted file mode 100644 index ba2288c505d..00000000000 --- a/dm/example-solutions/wrapper-template/folders-wrapper.py.schema +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Folder - description: | - Creates a folder under an organization or under a parent - folder. - -imports: -- path: ../../templates/folder/folder.py - name: cft-folder.py - -required: - - folders - -properties: - folders: - type: array - description: List of folders to create. - items: - orgId: - type: string - pattern: ^organization\/[0-9]{8,25}$ - description: | - The organization ID. If this field is set, the folder is - created under the organization. The value must conform to the - format `organizations/`. For example, - `organizations/111122223333`. - folderId: - type: string - pattern: ^folder\/[0-9]{8,25}$ - description: | - The folder ID. If this field is set, the folder is created - under the folder specified by the ID. The value must conform - to the format `folders/`. For example, - `folders/1234567890`. - displayName: - type: string - description: The display name of the folder. - pattern: | - [\p{L}\p{N}]({\p{L}\p{N}_- ]{0,28}[\p{L}\p{N}])? - -outputs: - properties: - folders: - type: array - description: Array of folder resource information. - items: - description: | - The name of the folder resource. For example, the output can be - referenced as: $(ref..rules..parent) - patternProperties: - ".*": - type: object - description: Details for a folder resource. - properties: - name: - type: string - description: | - Name of the folder resource in the format - `folders/`. - parent: - type: string - description: | - The resource name of the parent Folder or Organization. - displayName: - type: string - description: The folder's display name. - createTime: - type: string - description: Creation timestamp in RFC3339 text format. - lifecycleState: - type: string - description: The Folder's current lifecycle state. diff --git a/dm/example-solutions/wrapper-template/generic-wrapper.py b/dm/example-solutions/wrapper-template/generic-wrapper.py deleted file mode 100644 index 310e2ae65c6..00000000000 --- a/dm/example-solutions/wrapper-template/generic-wrapper.py +++ /dev/null @@ -1,23 +0,0 @@ -from helper import config_merger - - -def generate_config(context): - - # Using helper functions to load external configurations. - # The deployment YAML only contains the minimal context of the deployment. - # (Module name, environment) - # This way the wrapper template injects information to the target template - # without overloading the starting YAML. - - local_properties = config_merger.ConfigContext( - context.properties['environment'], - context.properties['module']) - - # Passing values forward to template - - return { - 'resources': [{ - 'type': "target-template.py", - 'name': context.env['name'], - 'properties': local_properties}] - } diff --git a/dm/example-solutions/wrapper-template/generic-wrapper.yaml b/dm/example-solutions/wrapper-template/generic-wrapper.yaml deleted file mode 100644 index 3ebf0b7b29d..00000000000 --- a/dm/example-solutions/wrapper-template/generic-wrapper.yaml +++ /dev/null @@ -1,12 +0,0 @@ -imports: -- path: generic-wrapper.py - name: generic-wrapper.py -- path: ../templates/project/project.py - name: target-template.py - -resources: -- name: cft_project - type: generic-wrapper.py - properties: - environment: dev - module: project diff --git a/dm/helpers/google_netblock_ip_ranges/README.md b/dm/helpers/google_netblock_ip_ranges/README.md deleted file mode 100644 index 16d8dcff626..00000000000 --- a/dm/helpers/google_netblock_ip_ranges/README.md +++ /dev/null @@ -1,57 +0,0 @@ -# Important Google IP ranges helper - -This helper creates firewall template rules for a network with Google important ranges. - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) -- Create a [network](../network/README.md) -- Grant the [compute.networkAdmin or compute.securityAdmin](https://cloud.google.com/compute/docs/access/iam) IAM role to the project service account - -## Deployment - -### Resources - -- [compute.beta.firewall](https://cloud.google.com/compute/docs/reference/rest/beta/firewalls) - - `Note:` The beta API supports the firewall log feature. - -### Usage - -1. Clone the [Deployment Manager samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit): - -```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit - cd cloud-foundation-toolkit -``` - -2. Go to the [dm](../../) directory: - -```shell - cd dm -``` - -3. Copy the example DM config to be used as a model for the deployment; in this case, with [firewall template](../../templates/firewall/firewall.py): - -```shell - cp helpers/google_netblock_ip_ranges/examples/google_netblock_ip_ranges_example.yaml google_netblock_ip_ranges_example.yaml -``` - -4. Change the values in the config file to match your specific GCP setup (like properties). - Name of the imported YAML-file with important IP ranges must be exact "google_netblock_ip_ranges.yaml": - -```shell - vim google_netblock_ip_ranges_example.yaml # <== change values to match your GCP setup -``` - -5. Create your deployment (replace with the relevant deployment name): - -```shell - gcloud deployment-manager deployments create \ - --config google_netblock_ip_ranges_example.yaml -``` - -## Examples - -- [Firewall](examples/google_netblock_ep_ranges_example.yaml) diff --git a/dm/helpers/google_netblock_ip_ranges/examples/google_netblock_ip_ranges_example.yaml b/dm/helpers/google_netblock_ip_ranges/examples/google_netblock_ip_ranges_example.yaml deleted file mode 100644 index ba4de2bd13f..00000000000 --- a/dm/helpers/google_netblock_ip_ranges/examples/google_netblock_ip_ranges_example.yaml +++ /dev/null @@ -1,42 +0,0 @@ -# Example of the google_netblock_ip_ranges helper usage. - -imports: - - path: helpers/google_netblock_ip_ranges/google_netblock_ip_ranges.py - name: google_netblock_ip_ranges.py - - path: helpers/google_netblock_ip_ranges/google_netblock_ip_ranges.yaml - name: google_netblock_ip_ranges.yaml # name must be exact "google_netblock_ip_ranges.yaml" - - path: templates/firewall/firewall.py - name: firewall.py - -resources: - - name: examle-firewall-rule - type: google_netblock_ip_ranges.py - properties: - template: firewall.py # name of the original DM template - network: - rules: - - name: allow-proxy-from-google-ranges - allowed: - - IPProtocol: tcp - ports: - - "80" - - "443" - description: example ingress rule for default net - direction: INGRESS - sourceRanges: - - 10.0.0.0/8 - - google_netblock_ip_ranges['google-netblocks']['cidrIPv4'] - - name: allow-dns-to-gcp-ranges - allowed: - - IPProtocol: udp - ports: - - "53" - - IPProtocol: tcp - ports: - - "53" - description: example egress rule for default net - direction: EGRESS - priority: 20 - destinationRanges: - - google_netblock_ip_ranges['cloud-netblocks']['cidrIPv4'] - - 8.8.8.8/32 diff --git a/dm/helpers/google_netblock_ip_ranges/google_netblock_ip_ranges.py b/dm/helpers/google_netblock_ip_ranges/google_netblock_ip_ranges.py deleted file mode 100644 index cd781811e55..00000000000 --- a/dm/helpers/google_netblock_ip_ranges/google_netblock_ip_ranges.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright 2019 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" This template substitutes google netblock IP ranges into firewall rules.""" - -import yaml - -def generate_config(context): - google_netblock_ip_ranges = yaml.load(context.imports['google_netblock_ip_ranges.yaml']) - properties = context.properties - resource_type = properties['template'] - properties.pop('template', None) - name = properties.get('name', context.env['name']) - properties.pop('name', None) - rules = [] - for rule in properties['rules']: - rule_sub = rule - if 'sourceRanges' in rule: - rule_source = [] - for index, src_range in enumerate(rule['sourceRanges']): - if 'google_netblock_ip_ranges' in src_range: - rule_source.extend(eval(src_range)) - else: - rule_source.append(src_range) - rule_sub['sourceRanges'] = rule_source - if 'destinationRanges' in rule: - rule_destination = [] - for index, dst_range in enumerate(rule['destinationRanges']): - if 'google_netblock_ip_ranges' in dst_range: - rule_destination.extend(eval(dst_range)) - else: - rule_destination.append(dst_range) - rule_sub['destinationRanges'] = rule_destination - rules.append(rule_sub) - properties.update({'rules': rules}) - resources = { - 'name': name, - 'type': resource_type, - 'properties': properties - } - - return {'resources': [resources]} diff --git a/dm/helpers/google_netblock_ip_ranges/google_netblock_ip_ranges.yaml b/dm/helpers/google_netblock_ip_ranges/google_netblock_ip_ranges.yaml deleted file mode 100644 index 0c3354671bd..00000000000 --- a/dm/helpers/google_netblock_ip_ranges/google_netblock_ip_ranges.yaml +++ /dev/null @@ -1,230 +0,0 @@ -# Google's important netbock IP ranges -restricted-googleapis: - cidr: - - "199.36.153.4/30" - cidrIPv4: - - "199.36.153.4/30" - -dns-forwarders: - cidr: - - "35.199.192.0/19" - cidrIPv4: - - "35.199.192.0/19" - -iap-forwarders: - cidr: - - "35.235.240.0/20" - cidrIPv4: - - "35.235.240.0/20" - -health-checkers: - cidr: - - "35.191.0.0/16" - - "130.211.0.0/22" - cidrIPv4: - - "35.191.0.0/16" - - "130.211.0.0/22" - -legacy-health-checkers: - cidr: - - "35.191.0.0/16" - - "209.85.152.0/22" - - "209.85.204.0/22" - cidrIPv4: - - "35.191.0.0/16" - - "209.85.152.0/22" - - "209.85.204.0/22" - -cloud-netblocks: - cidr: - - "8.34.208.0/20" - - "8.35.192.0/21" - - "8.35.200.0/23" - - "108.59.80.0/20" - - "108.170.192.0/20" - - "108.170.208.0/21" - - "162.216.148.0/22" - - "162.222.176.0/21" - - "173.255.112.0/20" - - "192.158.28.0/22" - - "199.192.112.0/22" - - "199.223.232.0/22" - - "199.223.236.0/23" - - "23.236.48.0/20" - - "23.251.128.0/19" - - "35.200.0.0/13" - - "35.208.0.0/13" - - "107.167.160.0/19" - - "107.178.192.0/18" - - "146.148.2.0/23" - - "146.148.4.0/22" - - "146.148.8.0/21" - - "146.148.16.0/20" - - "146.148.32.0/19" - - "146.148.64.0/18" - - "34.104.0.0/14" - - "130.211.8.0/21" - - "130.211.16.0/20" - - "130.211.32.0/19" - - "130.211.64.0/18" - - "130.211.128.0/17" - - "104.154.0.0/15" - - "104.196.0.0/14" - - "208.68.108.0/23" - - "35.184.0.0/14" - - "35.188.0.0/15" - - "35.216.0.0/15" - - "35.190.0.0/17" - - "35.190.128.0/18" - - "35.190.192.0/19" - - "35.235.224.0/20" - - "35.192.0.0/14" - - "35.196.0.0/15" - - "35.198.0.0/16" - - "35.199.0.0/17" - - "35.199.128.0/18" - - "35.235.216.0/21" - - "35.190.224.0/20" - - "35.232.0.0/15" - - "35.234.0.0/16" - - "35.235.0.0/17" - - "35.235.192.0/20" - - "35.236.0.0/14" - - "35.240.0.0/13" - - "130.211.4.0/22" - - "35.220.0.0/14" - - "34.64.0.0/11" - - "34.96.0.0/14" - - "34.100.0.0/16" - - "34.102.0.0/15" - - "108.170.216.0/22" - - "108.170.220.0/23" - - "108.170.222.0/24" - - "35.224.0.0/13" - - "35.190.240.0/22" - - "34.124.0.0/18" - - "2600:1900::/35" - cidrIPv4: - - "8.34.208.0/20" - - "8.35.192.0/21" - - "8.35.200.0/23" - - "108.59.80.0/20" - - "108.170.192.0/20" - - "108.170.208.0/21" - - "162.216.148.0/22" - - "162.222.176.0/21" - - "173.255.112.0/20" - - "192.158.28.0/22" - - "199.192.112.0/22" - - "199.223.232.0/22" - - "199.223.236.0/23" - - "23.236.48.0/20" - - "23.251.128.0/19" - - "35.200.0.0/13" - - "35.208.0.0/13" - - "107.167.160.0/19" - - "107.178.192.0/18" - - "146.148.2.0/23" - - "146.148.4.0/22" - - "146.148.8.0/21" - - "146.148.16.0/20" - - "146.148.32.0/19" - - "146.148.64.0/18" - - "34.104.0.0/14" - - "130.211.8.0/21" - - "130.211.16.0/20" - - "130.211.32.0/19" - - "130.211.64.0/18" - - "130.211.128.0/17" - - "104.154.0.0/15" - - "104.196.0.0/14" - - "208.68.108.0/23" - - "35.184.0.0/14" - - "35.188.0.0/15" - - "35.216.0.0/15" - - "35.190.0.0/17" - - "35.190.128.0/18" - - "35.190.192.0/19" - - "35.235.224.0/20" - - "35.192.0.0/14" - - "35.196.0.0/15" - - "35.198.0.0/16" - - "35.199.0.0/17" - - "35.199.128.0/18" - - "35.235.216.0/21" - - "35.190.224.0/20" - - "35.232.0.0/15" - - "35.234.0.0/16" - - "35.235.0.0/17" - - "35.235.192.0/20" - - "35.236.0.0/14" - - "35.240.0.0/13" - - "130.211.4.0/22" - - "35.220.0.0/14" - - "34.64.0.0/11" - - "34.96.0.0/14" - - "34.100.0.0/16" - - "34.102.0.0/15" - - "108.170.216.0/22" - - "108.170.220.0/23" - - "108.170.222.0/24" - - "35.224.0.0/13" - - "35.190.240.0/22" - - "34.124.0.0/18" - cidrIPv6: - - "2600:1900::/35" - -google-netblocks: - cidr: - - "35.190.247.0/24" - - "64.233.160.0/19" - - "66.102.0.0/20" - - "66.249.80.0/20" - - "72.14.192.0/18" - - "74.125.0.0/16" - - "108.177.8.0/21" - - "173.194.0.0/16" - - "209.85.128.0/17" - - "216.58.192.0/19" - - "216.239.32.0/19" - - "172.217.0.0/19" - - "172.217.32.0/20" - - "172.217.128.0/19" - - "172.217.160.0/20" - - "172.217.192.0/19" - - "108.177.96.0/19" - - "35.191.0.0/16" - - "130.211.0.0/22" - - "2001:4860:4000::/36" - - "2404:6800:4000::/36" - - "2607:f8b0:4000::/36" - - "2800:3f0:4000::/36" - - "2a00:1450:4000::/36" - - "2c0f:fb50:4000::/36" - cidrIPv4: - - "35.190.247.0/24" - - "64.233.160.0/19" - - "66.102.0.0/20" - - "66.249.80.0/20" - - "72.14.192.0/18" - - "74.125.0.0/16" - - "108.177.8.0/21" - - "173.194.0.0/16" - - "209.85.128.0/17" - - "216.58.192.0/19" - - "216.239.32.0/19" - - "172.217.0.0/19" - - "172.217.32.0/20" - - "172.217.128.0/19" - - "172.217.160.0/20" - - "172.217.192.0/19" - - "108.177.96.0/19" - - "35.191.0.0/16" - - "130.211.0.0/22" - cidrIPv6: - - "2001:4860:4000::/36" - - "2404:6800:4000::/36" - - "2607:f8b0:4000::/36" - - "2800:3f0:4000::/36" - - "2a00:1450:4000::/36" - - "2c0f:fb50:4000::/36" diff --git a/dm/pipeline/README.md b/dm/pipeline/README.md deleted file mode 100644 index 0b25bcc111f..00000000000 --- a/dm/pipeline/README.md +++ /dev/null @@ -1,86 +0,0 @@ -# CFT Sample Pipeline - - - -- [Overview](#overview) -- [Prerequisites](#prerequisites) -- [Pipelines](#pipelines) - - - -## Overview - -You can use the Cloud Foundation toolkit (henceforth, CFT) as a standalone -solution, via its command line interface (CLI) – see -[CFT User Guide](../docs/userguide.md) for details. Alternatively, you can -initiate CFT actions via its API, from a variety of existing orchestration -tools, or from your own application. - -This document describes one of the CFT integration scenarios, wherein -you initiate the CFT actions from Jenkins. It uses as an example a -Jenkins-based "sample pipeline", which is included in this CFT directory. - -`Note:` This document assumes that you are familiar with the basics of -[Jenkins](https://jenkins.io/) and of its -[Pipeline Plugin](https://jenkins.io/doc/book/pipeline/). - -`Note:` The Jenkins-based process this document describes is for demonstration -purposes only. It is not intended as a product. Your Jenkins setup is likely -to be different from the one used for demonstrate. Therefore, to achieve -similar results, you need to modify certain parameters in all the demo files. - -## Prerequisites - -1. A working Jenkins server: - - Different organizations have vastly different Jenkins setups. Therefore, - this document provides no specific recommendations for fulfilling this - prerequisite. You might use a Compute Image from - [Marketplace](https://console.cloud.google.com/marketplace/browse?q=jenkins). - - Install the Pipeline Utility Steps plugin. -2. GCP Service Accounts (SA): - - `Service Account for Jenkins`: Jenkins must be configured with - permissions sufficient for managing DM deployments. This can be achieved - by: - - Associating a SA with the GCP Compute Instance running Jenkins (if - Jenkins is in GCP), or - - Configuring the SA credentials with the Jenkins user (if running - Jenkins outside GCP) - - `Service Account for the GCP project` (a.k.a. the DM Service Account): - this SA needs permissions to all APIs DM uses to create resources. -3. The Cloud Foundation toolkit: - - CFT must be installed in the Jenkins master and slaves. For installation - instructions, see the [CFT User - Guide](../docs/userguide.md#toolkit-installation-and-configuration). - - Note that the [Google Cloud SDK](https://cloud.google.com/sdk) is a - prerequisite for the CFT. -4. The Environment Variables file: - - An example file is [here](pipeline-vars). - - Replace with values specific to you organization, and move - the file to the Jenkins user's home directory. - -## Pipelines - -This directory implements deployment pipelines to show how the CFT can be used -in a *fictitious company*. In this fictitious company, three separate teams are -responsible for the corresponding separate pieces of the cloud infrastructure: - -- Central Cloud Platform Team: - - Responsible for creating GCP projects, IAM entities, Permissions, - Billing, etc. - - Owns the pipeline and configs in [project](project) -- Central Networking Team: - - Responsible for networking between for all other teams, interconnects, - on-premise integration, etc. - - Owns the pipeline and configs in [network](network) -- Application Teams (typically, more than one): - - Responsible for deploying the team-specific application stack (in this - example, there is a single application team, which is responsible for - deploying its GKE clusters in the different environments) - - Owns the pipeline and configs in [app](app) - -Each folder in this directory of the CFT repository represents and implements -a pipeline that corresponds to one of the above teams. - -`Note:` This is not a typical way of organizing Jenkins pipelines. Normally, -each pipeline would be in its own Git repository, with its own access controls -for the different teams. \ No newline at end of file diff --git a/dm/pipeline/app/Jenkinsfile b/dm/pipeline/app/Jenkinsfile deleted file mode 100644 index 221a250d72a..00000000000 --- a/dm/pipeline/app/Jenkinsfile +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env groovy - - -// requires "Pipeline Utility Steps" plugin -// requires "pipeline-vars" file to be setup inside jenkins user home dir - -def config_dir = "pipeline/app" // relative to ${cft_dir} - -def env = "~/pipeline-vars" -def repo = "https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit" -def branch = "pipeline" -def git_dir = "cft" -def cft_dir = "dm" - -pipeline { - agent any - stages { - stage('Checkout Repos') { - steps { - sh "rm -rf ${git_dir}" - sh "git clone ${repo} ${git_dir}" - sh "cd ${git_dir} && git checkout ${branch}" - } - } - stage('Initialize Deploy Stages') { - steps { - sh ". ${env} && cd ${git_dir}/${cft_dir} && cft apply ${config_dir} --show-stages --format yaml> .stages.yaml" - script { - def graph = readYaml file: "${git_dir}/${cft_dir}/.stages.yaml" - def i = 1 - graph.each { stg -> - stage("stage-${i}") { - def config_list = [] - stg.each { conf -> - config_list.add(conf.source) - } - def configs = config_list.join(" ") - echo "Executing configs: ${configs}" - sh ". ${env} && cd ${git_dir}/${cft_dir} && cft apply ${configs}" - } - i++ - } - } - } - } - } -} diff --git a/dm/pipeline/app/app-team-dev.yaml b/dm/pipeline/app/app-team-dev.yaml deleted file mode 100644 index 6dcee10455a..00000000000 --- a/dm/pipeline/app/app-team-dev.yaml +++ /dev/null @@ -1,24 +0,0 @@ -name: app-team-1-dev -project: {{env.CFT_CHILD_PROJECT}} -description: App Team 1 DEV GKE Cluster - -imports: - - path: templates/gke/gke.py - name: gke.py - -resources: - - name: app-team-1-dev - type: gke.py - properties: - zone: us-east1-b - cluster: - name: app-team-1-dev - description: App Team 1 DEV GKE Cluster - network: $(out.network-app-team-1.network-app-team-1-dev.name) - subnetwork: subnet-app-team-1-dev - nodeConfig: - oauthScopes: - - https://www.googleapis.com/auth/compute - - https://www.googleapis.com/auth/devstorage.read_only - - https://www.googleapis.com/auth/logging.write - - https://www.googleapis.com/auth/monitoring diff --git a/dm/pipeline/app/app-team-prod.yaml b/dm/pipeline/app/app-team-prod.yaml deleted file mode 100644 index 8c85a7d4e7f..00000000000 --- a/dm/pipeline/app/app-team-prod.yaml +++ /dev/null @@ -1,28 +0,0 @@ -name: app-team-1-prod -project: {{env.CFT_CHILD_PROJECT}} -description: App Team 1 PROD GKE Cluster - -imports: - - path: templates/gke/gke.py - name: gke.py - -resources: - - name: app-team-1-prod - type: gke.py - properties: - clusterLocationType: Regional - region: us-east1 - cluster: - name: app-team-1-prod-cluster - description: App Team 1 PROD GKE Cluster - network: $(out.network-app-team-1.network-app-team-1-prod.name) - subnetwork: subnet-app-team-1-prod - nodeConfig: - oauthScopes: - - https://www.googleapis.com/auth/compute - - https://www.googleapis.com/auth/devstorage.read_only - - https://www.googleapis.com/auth/logging.write - - https://www.googleapis.com/auth/monitoring - locations: - - us-east1-c - - us-east1-b diff --git a/dm/pipeline/network/Jenkinsfile b/dm/pipeline/network/Jenkinsfile deleted file mode 100644 index 9b2ae89576c..00000000000 --- a/dm/pipeline/network/Jenkinsfile +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env groovy - - -// requires "Pipeline Utility Steps" plugin -// requires "pipeline-vars" file to be setup inside jenkins user home dir - -def config_dir = "pipeline/network" // relative to ${cft_dir} - -def env = "~/pipeline-vars" -def repo = "https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit" -def branch = "pipeline" -def git_dir = "cft" -def cft_dir = "dm" - -pipeline { - agent any - stages { - stage('Checkout Repos') { - steps { - sh "rm -rf ${git_dir}" - sh "git clone ${repo} ${git_dir}" - sh "cd ${git_dir} && git checkout ${branch}" - } - } - stage('Initialize Deploy Stages') { - steps { - sh ". ${env} && cd ${git_dir}/${cft_dir} && cft apply ${config_dir} --show-stages --format yaml> .stages.yaml" - script { - def graph = readYaml file: "${git_dir}/${cft_dir}/.stages.yaml" - def i = 1 - graph.each { stg -> - stage("stage-${i}") { - def config_list = [] - stg.each { conf -> - config_list.add(conf.source) - } - def configs = config_list.join(" ") - echo "Executing configs: ${configs}" - sh ". ${env} && cd ${git_dir}/${cft_dir} && cft apply ${configs}" - } - i++ - } - } - } - } - } -} diff --git a/dm/pipeline/network/firewall-app-team-1.yaml b/dm/pipeline/network/firewall-app-team-1.yaml deleted file mode 100644 index 2213da659a8..00000000000 --- a/dm/pipeline/network/firewall-app-team-1.yaml +++ /dev/null @@ -1,49 +0,0 @@ -name: firewall-app-team-1 -project: {{env.CFT_CHILD_PROJECT}} -description: Firewall deployment for app-team-1 - -imports: - - path: templates/firewall/firewall.py -resources: - - type: templates/firewall/firewall.py - name: dev-rules - properties: - network: $(out.network-app-team-1.network-app-team-1-dev.name) - rules: - - name: allow-proxy-from-inside-dev - allowed: - - IPProtocol: tcp - ports: - - "80" - - "443" - description: This rule allows connectivity to the HTTP proxies - direction: INGRESS - sourceRanges: - - 10.0.0.0/8 - - name: allow-dns-from-all - allowed: - - IPProtocol: udp - ports: - - "53" - - IPProtocol: tcp - ports: - - "53" - description: this rule allows DNS queries to google's 8.8.8.8 - direction: EGRESS - destinationRanges: - - 8.8.8.8/32 - - type: templates/firewall/firewall.py - name: prod-rules - properties: - network: $(out.network-app-team-1.network-app-team-1-prod.name) - rules: - - name: allow-proxy-from-inside-prod - allowed: - - IPProtocol: tcp - ports: - - "80" - - "443" - description: This rule allows connectivity to the HTTP proxies - direction: INGRESS - sourceRanges: - - 10.0.0.0/8 diff --git a/dm/pipeline/network/firewall-app-team-2.yaml b/dm/pipeline/network/firewall-app-team-2.yaml deleted file mode 100644 index 681535f38dc..00000000000 --- a/dm/pipeline/network/firewall-app-team-2.yaml +++ /dev/null @@ -1,47 +0,0 @@ -name: firewall-app-team-2 -project: {{env.CFT_CHILD_PROJECT}} -description: Firewall deployment for app-team-2 - -imports: - - path: templates/firewall/firewall.py -resources: - - type: templates/firewall/firewall.py - name: dev-rules - properties: - network: $(out.network-app-team-2.network-app-team-2-dev.name) - rules: - - name: allow-proxy-from-inside-dev - allowed: - - IPProtocol: tcp - ports: - - "443" - description: This rule allows connectivity to the HTTP proxies - direction: INGRESS - sourceRanges: - - 10.0.0.0/8 - - name: allow-dns-from-all - allowed: - - IPProtocol: udp - ports: - - "53" - - IPProtocol: tcp - ports: - - "53" - description: this rule allows DNS queries to google's 8.8.8.8 - direction: EGRESS - destinationRanges: - - 8.8.8.8/32 - - type: templates/firewall/firewall.py - name: prod-rules - properties: - network: $(out.network-app-team-2.network-app-team-2-prod.name) - rules: - - name: allow-proxy-from-inside-prod - allowed: - - IPProtocol: tcp - ports: - - "443" - description: This rule allows connectivity to the HTTP proxies - direction: INGRESS - sourceRanges: - - 10.0.0.0/8 diff --git a/dm/pipeline/network/network-app-team-1.yaml b/dm/pipeline/network/network-app-team-1.yaml deleted file mode 100644 index 4da3759e35a..00000000000 --- a/dm/pipeline/network/network-app-team-1.yaml +++ /dev/null @@ -1,24 +0,0 @@ -name: network-app-team-1 -project: {{env.CFT_CHILD_PROJECT}} -description: Network deployment for app-team-1 - -imports: - - path: templates/network/network.py - -resources: - - type: templates/network/network.py - name: network-app-team-1-dev - properties: - autoCreateSubnetworks: false - subnetworks: - - name: subnet-app-team-1-dev - region: us-east1 - ipCidrRange: 192.168.0.0/24 - - type: templates/network/network.py - name: network-app-team-1-prod - properties: - autoCreateSubnetworks: false - subnetworks: - - name: subnet-app-team-1-prod - region: us-east1 - ipCidrRange: 192.168.1.0/24 diff --git a/dm/pipeline/network/network-app-team-2.yaml b/dm/pipeline/network/network-app-team-2.yaml deleted file mode 100644 index d3443dd5de3..00000000000 --- a/dm/pipeline/network/network-app-team-2.yaml +++ /dev/null @@ -1,16 +0,0 @@ -name: network-app-team-2 -project: {{env.CFT_CHILD_PROJECT}} -description: Network deployment for app-team-2 - -imports: - - path: templates/network/network.py - -resources: - - type: templates/network/network.py - name: network-app-team-2-dev - properties: - autoCreateSubnetworks: false - - type: templates/network/network.py - name: network-app-team-2-prod - properties: - autoCreateSubnetworks: true diff --git a/dm/pipeline/network/proxy-app-team-2.yaml b/dm/pipeline/network/proxy-app-team-2.yaml deleted file mode 100644 index 022c7bb32ae..00000000000 --- a/dm/pipeline/network/proxy-app-team-2.yaml +++ /dev/null @@ -1,22 +0,0 @@ -name: proxy-app-team-2 -project: {{env.CFT_CHILD_PROJECT}} -description: Proxy deployment for app-team-2 - -imports: - - path: templates/instance/instance.py - name: instance.py - -resources: - - name: proxy-app-team-2-prod - type: instance.py - properties: - zone: us-central1-a - diskImage: projects/ubuntu-os-cloud/global/images/family/ubuntu-1804-lts - diskSizeGb: 100 - machineType: f1-micro - diskType: pd-ssd - network: $(out.network-app-team-2.network-app-team-2-prod.name) - metadata: - items: - - key: startup-script - value: sudo apt-get update && sudo apt-get install -y nginx diff --git a/dm/pipeline/pipeline-vars b/dm/pipeline/pipeline-vars deleted file mode 100644 index a48645bf2e1..00000000000 --- a/dm/pipeline/pipeline-vars +++ /dev/null @@ -1,15 +0,0 @@ -# This file is used by the sample pipeline configs to make them usable by -# different organizations, and to hide sensitive IDs from git. -# -# It's strongly recommended not to keep your organization's specific info in -# a public git repo. -# -# To use the supplied configs without modification, replace the -# fields with values pertinent to your organization, and install this file -# inside the Jenkins user's home directory (~jenkins/, or ~tomcat/, etc) - -export CLOUD_FOUNDATION_PROJECT_ID= # This is the project used to create new projects (only used in the 'project' Jenkins job) -export CFT_ORGANIZATION_ID= # Organization ID in which new projects get created (only used in the 'project' Jenkins job) -export CFT_ORGANIZATION_FOLDER_ID= # Folder ID in which new projects get created (only used in the 'project' Jenkins job) -export CFT_BILLING_ACCOUNT_ID= # Billing accounts to associate with new projects (only used in the 'project' Jenkins job) -export CFT_CHILD_PROJECT= # This is the project where the resources get created diff --git a/dm/pipeline/project/Jenkinsfile b/dm/pipeline/project/Jenkinsfile deleted file mode 100644 index cd7bd8ce379..00000000000 --- a/dm/pipeline/project/Jenkinsfile +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env groovy - - -// requires "Pipeline Utility Steps" plugin -// requires "pipeline-vars" file to be setup inside jenkins user home dir - -def config_dir = "pipeline/project" // relative to ${cft_dir} - -def env = "~/pipeline-vars" -def repo = "https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit" -def branch = "pipeline" -def git_dir = "cft" -def cft_dir = "dm" - -pipeline { - agent any - stages { - stage('Checkout Repos') { - steps { - sh "rm -rf ${git_dir}" - sh "git clone ${repo} ${git_dir}" - sh "cd ${git_dir} && git checkout ${branch}" - } - } - stage('Initialize Deploy Stages') { - steps { - sh ". ${env} && cd ${git_dir}/${cft_dir} && cft apply ${config_dir} --show-stages --format yaml> .stages.yaml" - script { - def graph = readYaml file: "${git_dir}/${cft_dir}/.stages.yaml" - def i = 1 - graph.each { stg -> - stage("stage-${i}") { - def config_list = [] - stg.each { conf -> - config_list.add(conf.source) - } - def configs = config_list.join(" ") - echo "Executing configs: ${configs}" - sh ". ${env} && cd ${git_dir}/${cft_dir} && cft apply ${configs}" - } - i++ - } - } - } - } - } -} diff --git a/dm/pipeline/project/project.yaml b/dm/pipeline/project/project.yaml deleted file mode 100644 index a4cd290c542..00000000000 --- a/dm/pipeline/project/project.yaml +++ /dev/null @@ -1,25 +0,0 @@ -name: sourced-cft-demo-0 -imports: - - path: templates/project/project.py - name: project.py -resources: - - name: {{env.CFT_CHILD_PROJECT}} - type: project.py - properties: - parent: - type: folder - id: {{env.CFT_ORGANIZATION_FOLDER_ID}} - billingAccountId: {{env.CFT_BILLING_ACCOUNT_ID}} - activateApis: - - compute.googleapis.com - - deploymentmanager.googleapis.com - - pubsub.googleapis.com - - container.googleapis.com - serviceAccounts: - - accountId: cft-demo-sa-0 - displayName: cft demo service account 0 - roles: - - roles/editor - - roles/viewer - networkAccess: true - usageExportBucket: false diff --git a/dm/pipeline/teardown/Jenkinsfile b/dm/pipeline/teardown/Jenkinsfile deleted file mode 100644 index bedfbd95a7d..00000000000 --- a/dm/pipeline/teardown/Jenkinsfile +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env groovy - - -// requires "Pipeline Utility Steps" plugin -// requires "pipeline-vars" file to be setup inside jenkins user home dir - -def config_dir = "pipeline/network pipeline/app" // relative to ${cft_dir} - -def env = "~/pipeline-vars" -def repo = "https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit" -def branch = "pipeline" -def git_dir = "cft" -def cft_dir = "dm" - -pipeline { - agent any - stages { - stage('Checkout Repos') { - steps { - sh "rm -rf ${git_dir}" - sh "git clone ${repo} ${git_dir}" - sh "cd cft && git checkout ${branch}" - } - } - stage('Initialize Deploy Stages') { - steps { - sh ". ${env} && cd ${git_dir}/${cft_dir} && cft delete ${config_dir} --show-stages --format yaml> .stages.yaml" - script { - def graph = readYaml file: "${git_dir}/${cft_dir}/.stages.yaml" - def i = 1 - graph.each { stg -> - stage("stage-${i}") { - def config_list = [] - stg.each { conf -> - config_list.add(conf.source) - } - def configs = config_list.join(" ") - echo "Executing configs: ${configs}" - sh ". ${env} && cd ${git_dir}/${cft_dir} && cft delete ${configs}" - } - i++ - } - } - } - } - } -} diff --git a/dm/pylintrc b/dm/pylintrc deleted file mode 100644 index 06fae4d176a..00000000000 --- a/dm/pylintrc +++ /dev/null @@ -1,408 +0,0 @@ -[MASTER] - -# Specify a configuration file. -#rcfile= - -# Python code to execute, usually for sys.path manipulation such as -# pygtk.require(). -#init-hook= - -# Add files or directories to the blacklist. They should be base names, not -# paths. -ignore=CVS - -# Add files or directories matching the regex patterns to the blacklist. The -# regex matches against base names, not paths. -ignore-patterns= - -# Pickle collected data for later comparisons. -persistent=yes - -# List of plugins (as comma separated values of python modules names) to load, -# usually to register additional checkers. -load-plugins= - -# Use multiple processes to speed up Pylint. -jobs=1 - -# Allow loading of arbitrary C extensions. Extensions are imported into the -# active Python interpreter and may run arbitrary code. -unsafe-load-any-extension=no - -# A comma-separated list of package or module names from where C extensions may -# be loaded. Extensions are loading into the active Python interpreter and may -# run arbitrary code -extension-pkg-whitelist=numpy - -# Allow optimization of some AST trees. This will activate a peephole AST -# optimizer, which will apply various small optimizations. For instance, it can -# be used to obtain the result of joining multiple strings with the addition -# operator. Joining a lot of strings can lead to a maximum recursion error in -# Pylint and this flag can prevent that. It has one side effect, the resulting -# AST will be different than the one from reality. This option is deprecated -# and it will be removed in Pylint 2.0. -optimize-ast=no - - -[MESSAGES CONTROL] - -# Only show warnings with the listed confidence levels. Leave empty to show -# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED -confidence= - -# Enable the message, report, category or checker with the given id(s). You can -# either give multiple identifier separated by comma (,) or put this option -# multiple time (only on the command line, not in the configuration file where -# it should appear only once). See also the "--disable" option for examples. -#enable= - -# Disable the message, report, category or checker with the given id(s). You -# can either give multiple identifiers separated by comma (,) or put this -# option multiple times (only on the command line, not in the configuration -# file where it should appear only once).You can also use "--disable=all" to -# disable everything first and then reenable specific checks. For example, if -# you want to run only the similarities checker, you can use "--disable=all -# --enable=similarities". If you want to run only the classes checker, but have -# no Warning level messages displayed, use"--disable=all --enable=classes -# --disable=W" -disable=long-suffix,standarderror-builtin,indexing-exception,delslice-method,unichr-builtin,dict-view-method,parameter-unpacking,unicode-builtin,cmp-builtin,intern-builtin,round-builtin,backtick,nonzero-method,xrange-builtin,coerce-method,raw_input-builtin,old-division,filter-builtin-not-iterating,old-octal-literal,input-builtin,map-builtin-not-iterating,buffer-builtin,basestring-builtin,zip-builtin-not-iterating,using-cmp-argument,unpacking-in-except,old-raise-syntax,coerce-builtin,dict-iter-method,hex-method,range-builtin-not-iterating,useless-suppression,cmp-method,print-statement,reduce-builtin,file-builtin,long-builtin,getslice-method,execfile-builtin,no-absolute-import,metaclass-assignment,oct-method,reload-builtin,import-star-module-level,suppressed-message,apply-builtin,raising-string,next-method-called,setslice-method,old-ne-operator,arguments-differ,wildcard-import,locally-disabled - - -[REPORTS] - -# Set the output format. Available formats are text, parseable, colorized, msvs -# (visual studio) and html. You can also give a reporter class, eg -# mypackage.mymodule.MyReporterClass. -output-format=text - -# Put messages in a separate file for each module / package specified on the -# command line instead of printing them on stdout. Reports (if any) will be -# written in a file name "pylint_global.[txt|html]". This option is deprecated -# and it will be removed in Pylint 2.0. -files-output=no - -# Tells whether to display a full report or only the messages -reports=yes - -# Python expression which should return a note less than 10 (10 is the highest -# note). You have access to the variables errors warning, statement which -# respectively contain the number of errors / warnings messages and the total -# number of statements analyzed. This is used by the global evaluation report -# (RP0004). -evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) - -# Template used to display messages. This is a python new-style format string -# used to format the message information. See doc for all details -#msg-template= - - -[BASIC] - -# Good variable names which should always be accepted, separated by a comma -good-names=i,j,k,ex,Run,_ - -# Bad variable names which should always be refused, separated by a comma -bad-names=foo,bar,baz,toto,tutu,tata - -# Colon-delimited sets of names that determine each other's naming style when -# the name regexes allow several styles. -name-group= - -# Include a hint for the correct naming format with invalid-name -include-naming-hint=no - -# List of decorators that produce properties, such as abc.abstractproperty. Add -# to this list to register other decorators that produce valid properties. -property-classes=abc.abstractproperty - -# Regular expression matching correct variable names -variable-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for variable names -variable-name-hint=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression matching correct class attribute names -class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ - -# Naming hint for class attribute names -class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ - -# Regular expression matching correct argument names -argument-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for argument names -argument-name-hint=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression matching correct module names -module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ - -# Naming hint for module names -module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ - -# Regular expression matching correct constant names -const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ - -# Naming hint for constant names -const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$ - -# Regular expression matching correct inline iteration names -inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ - -# Naming hint for inline iteration names -inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$ - -# Regular expression matching correct method names -method-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for method names -method-name-hint=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression matching correct function names -function-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for function names -function-name-hint=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression matching correct attribute names -attr-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for attribute names -attr-name-hint=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression matching correct class names -class-rgx=[A-Z_][a-zA-Z0-9]+$ - -# Naming hint for class names -class-name-hint=[A-Z_][a-zA-Z0-9]+$ - -# Regular expression which should only match function or class names that do -# not require a docstring. -no-docstring-rgx=^test_ - -# Minimum line length for functions/classes that require docstrings, shorter -# ones are exempt. -docstring-min-length=-1 - - -[ELIF] - -# Maximum number of nested blocks for function / method body -max-nested-blocks=5 - - -[FORMAT] - -# Maximum number of characters on a single line. -max-line-length=80 - -# Regexp for a line that is allowed to be longer than the limit. -ignore-long-lines=^\s*(# )??$ - -# Allow the body of an if to be on the same line as the test if there is no -# else. -single-line-if-stmt=y - -# List of optional constructs for which whitespace checking is disabled. `dict- -# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}. -# `trailing-comma` allows a space between comma and closing bracket: (a, ). -# `empty-line` allows space-only lines. -no-space-check=trailing-comma,dict-separator - -# Maximum number of lines in a module -max-module-lines=1000 - -# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 -# tab). -indent-string=' ' - -# Number of spaces of indent required inside a hanging or continued line. -indent-after-paren=4 - -# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. -expected-line-ending-format= - - -[LOGGING] - -# Logging modules to check that the string format arguments are in logging -# function parameter format -logging-modules=logging - - -[MISCELLANEOUS] - -# List of note tags to take in consideration, separated by a comma. -notes=FIXME,XXX,TODO - - -[SIMILARITIES] - -# Minimum lines number of a similarity. -min-similarity-lines=10 - -# Ignore comments when computing similarities. -ignore-comments=yes - -# Ignore docstrings when computing similarities. -ignore-docstrings=yes - -# Ignore imports when computing similarities. -ignore-imports=no - - -[SPELLING] - -# Spelling dictionary name. Available dictionaries: none. To make it working -# install python-enchant package. -spelling-dict= - -# List of comma separated words that should not be checked. -spelling-ignore-words= - -# A path to a file that contains private dictionary; one word per line. -spelling-private-dict-file= - -# Tells whether to store unknown words to indicated private dictionary in -# --spelling-private-dict-file option instead of raising a message. -spelling-store-unknown-words=no - - -[TYPECHECK] - -# Tells whether missing members accessed in mixin class should be ignored. A -# mixin class is detected if its name ends with "mixin" (case insensitive). -ignore-mixin-members=yes - -# List of module names for which member attributes should not be checked -# (useful for modules/projects where namespaces are manipulated during runtime -# and thus existing member attributes cannot be deduced by static analysis. It -# supports qualified module names, as well as Unix pattern matching. -ignored-modules= - -# List of class names for which member attributes should not be checked (useful -# for classes with dynamically set attributes). This supports the use of -# qualified names. -ignored-classes=optparse.Values,thread._local,_thread._local,matplotlib.cm,tensorflow.python,tensorflow,tensorflow.train.Example,RunOptions - -# List of members which are set dynamically and missed by pylint inference -# system, and so shouldn't trigger E1101 when accessed. Python regular -# expressions are accepted. -generated-members=set_shape,np.float32 - -# List of decorators that produce context managers, such as -# contextlib.contextmanager. Add to this list to register other decorators that -# produce valid context managers. -contextmanager-decorators=contextlib.contextmanager - - -[VARIABLES] - -# Tells whether we should check for unused import in __init__ files. -init-import=no - -# A regular expression matching the name of dummy variables (i.e. expectedly -# not used). -dummy-variables-rgx=(_+[a-zA-Z0-9_]*?$)|dummy - -# List of additional names supposed to be defined in builtins. Remember that -# you should avoid to define new builtins when possible. -additional-builtins= - -# List of strings which can identify a callback function by name. A callback -# name must start or end with one of those strings. -callbacks=cb_,_cb - -# List of qualified module names which can have objects that can redefine -# builtins. -redefining-builtins-modules=six.moves,future.builtins - - -[CLASSES] - -# List of method names used to declare (i.e. assign) instance attributes. -defining-attr-methods=__init__,__new__,setUp - -# List of valid names for the first argument in a class method. -valid-classmethod-first-arg=cls - -# List of valid names for the first argument in a metaclass class method. -valid-metaclass-classmethod-first-arg=mcs - -# List of member names, which should be excluded from the protected access -# warning. -exclude-protected=_asdict,_fields,_replace,_source,_make - - -[DESIGN] - -# Maximum number of arguments for function / method -max-args=10 - -# Argument names that match this expression will be ignored. Default to name -# with leading underscore -ignored-argument-names=_.* - -# Maximum number of locals for function / method body -max-locals=30 - -# Maximum number of return / yield for function / method body -max-returns=6 - -# Maximum number of branch for function / method body -max-branches=12 - -# Maximum number of statements in function / method body -max-statements=100 - -# Maximum number of parents for a class (see R0901). -max-parents=7 - -# Maximum number of attributes for a class (see R0902). -max-attributes=10 - -# Minimum number of public methods for a class (see R0903). -min-public-methods=0 - -# Maximum number of public methods for a class (see R0904). -max-public-methods=20 - -# Maximum number of boolean expressions in a if statement -max-bool-expr=5 - - -[IMPORTS] - -# Deprecated modules which should not be used, separated by a comma -deprecated-modules=optparse - -# Create a graph of every (i.e. internal and external) dependencies in the -# given file (report RP0402 must not be disabled) -import-graph= - -# Create a graph of external dependencies in the given file (report RP0402 must -# not be disabled) -ext-import-graph= - -# Create a graph of internal dependencies in the given file (report RP0402 must -# not be disabled) -int-import-graph= - -# Force import order to recognize a module as part of the standard -# compatibility libraries. -known-standard-library= - -# Force import order to recognize a module as part of a third party library. -known-third-party=enchant - -# Analyse import fallback blocks. This can be used to support both Python 2 and -# 3 compatible code, which means that the block might have code that exists -# only in one or another interpreter, leading to false positives when analysed. -analyse-fallback-blocks=no - - -[EXCEPTIONS] - -# Exceptions that will emit a warning when being caught. Defaults to -# "Exception" -overgeneral-exceptions=Exception - diff --git a/dm/pytest.ini b/dm/pytest.ini deleted file mode 100644 index 676ed45cc25..00000000000 --- a/dm/pytest.ini +++ /dev/null @@ -1,4 +0,0 @@ -[pytest] -testpaths = tests -addopts = --cov=cloud_foundation_toolkit - diff --git a/dm/requirements/development.txt b/dm/requirements/development.txt deleted file mode 100644 index d1b8c25e884..00000000000 --- a/dm/requirements/development.txt +++ /dev/null @@ -1,8 +0,0 @@ -coverage -mock -pylint -pytest -pytest-cov -setuptools -yapf -jsonschema diff --git a/dm/requirements/install.txt b/dm/requirements/install.txt deleted file mode 100644 index 74c9740edf8..00000000000 --- a/dm/requirements/install.txt +++ /dev/null @@ -1,2 +0,0 @@ -jinja2 -networkx diff --git a/dm/requirements/prerequisites.txt b/dm/requirements/prerequisites.txt deleted file mode 100644 index 1a4adc9dd9b..00000000000 --- a/dm/requirements/prerequisites.txt +++ /dev/null @@ -1,3 +0,0 @@ -tox -virtualenv -wheel diff --git a/dm/setup.cfg b/dm/setup.cfg deleted file mode 100644 index b86fd100bc8..00000000000 --- a/dm/setup.cfg +++ /dev/null @@ -1,6 +0,0 @@ -[bdist_wheel] -universal = 1 - -[metadata] -description-file = README.md -license_file = ../LICENSE diff --git a/dm/setup.py b/dm/setup.py deleted file mode 100644 index 0616d373421..00000000000 --- a/dm/setup.py +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env python - -from setuptools import find_packages -from setuptools import setup - - -def get_version(): - with open('VERSION') as f: - return f.readline().rstrip() - - -def get_install_requirements(): - with open('requirements/install.txt') as f: - return [l.strip() for l in f if l.strip() and not l.startswith('#')] - - -config = { - 'name': 'cloud-foundation-toolkit', - 'version': get_version(), - 'description': 'Cloud Foundation Toolkit', - 'author': 'Gustavo Baratto', - 'author_email': 'gbaratto@gmail.com', - 'url': 'https://github.com/GoogleCloudPlatform/deploymentmanager-sample', - 'packages': find_packages('src'), - 'package_dir': {'': 'src'}, - 'scripts': [ - 'src/cft', - 'src/cftenv' - ], - 'install_requires': get_install_requirements(), - 'include_package_data': True -} - - -setup(**config) diff --git a/dm/solutions/take5-demo/README.md b/dm/solutions/take5-demo/README.md deleted file mode 100644 index 97cfafd9172..00000000000 --- a/dm/solutions/take5-demo/README.md +++ /dev/null @@ -1,98 +0,0 @@ -# Demo script for Deployment Manager - -This script is part of the Take5 demo for **Deployment Manager**. -This tutorial walks you through how to start with **Deployment Manager** -and how to use the **Cloud Foundation Toolkit**. - -The video will be published shortly. - -## Part 1 - Firewall rules - -```bash -# Clone the CFT Repo -git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit.git -cd cloud-foundation-toolkit/dm - -# Copy the Firewall config file -cp templates/firewall/examples/firewall.yaml my_firewall.yaml - -# Specifying the VPC for the Firewall rules -export VpcName=default - -# Edit my_firewall.yaml - Change the network name manually or via CLI: -sed -i "s//$VpcName/g" my_firewall.yaml - -# Enable the compute API to create firewalls, enable the deploymentmanager API to create deployments. -gcloud services enable compute.googleapis.com deploymentmanager.googleapis.com -gcloud deployment-manager deployments create my-first-firewalls --config my_firewall.yaml - -# Manually change the my_firewall.yaml and try out the changes -gcloud deployment-manager deployments update my-first-firewalls --config my_firewall.yaml - -# Clean up the deployment -gcloud deployment-manager deployments delete my-first-firewalls - -``` -## Part 2 - Project Factory with Shared-VPC - -```bash -# Prerequisites - Setting the environment specific values -export OrgID=518838582042 -export ProjectNumber=700306896797 -export BillingID=01BACD-32281D-31B750 -export ProjectUniqueNameH=take5-host-xt-1300 -export ProjectUniqueNameG=take5-host-xg-1300 -export ParentFolderID=1049237988874 - -# Enabling the required APIs and IAM permissions -gcloud services enable deploymentmanager.googleapis.com cloudresourcemanager.googleapis.com cloudbilling.googleapis.com iam.googleapis.com servicemanagement.googleapis.com -gcloud organizations add-iam-policy-binding $OrgID --member=serviceAccount:$ProjectNumber@cloudservices.gserviceaccount.com --role=roles/resourcemanager.projectCreator - -## Add @cloudservices.gserviceaccount.com to the billing account as Billing User MANUALLY - -cp templates/project/examples/project.yaml my_project.yaml - -# Edit my_firewall.yaml - Change the Org/Folder ID, BillingID, UniqueProjectName manually or change it via CLI: -sed -i "s//$ProjectUniqueNameH/g" my_project.yaml -sed -i "s/type: organization/type: folder/g" my_project.yaml -sed -i "s//$ParentFolderID/g" my_project.yaml -sed -i "s//$BillingID/g" my_project.yaml - -# Manual remove attachment to a shared VPC from my_project.yaml - -# Create the project -gcloud deployment-manager deployments create my-first-project --config my_project.yaml - -# add `sharedVPCHost: true` to my_project.yaml - -# Enable the Deployment Manager SA to attach projects to the shared VPC -gcloud organizations add-iam-policy-binding $OrgID --member=serviceAccount:$ProjectNumber@cloudservices.gserviceaccount.com --role=roles/compute.xpnAdmin - -# Update the project to a Shared-VPC host project -gcloud deployment-manager deployments update my-first-project --config my_project.yaml - -cp templates/network/examples/network.yaml my_network.yaml - -# Create the Shared-VPC and its subnets -gcloud deployment-manager deployments create my-first-network --config my_network.yaml --project $ProjectUniqueNameH - - -cp templates/project/examples/project.yaml my_guest_project.yaml - - -# nano my_guest_project.yaml - Change the network name and other values manually or change it via CLI: -sed -i "s//$ProjectUniqueNameG/g" my_guest_project.yaml -sed -i "s/type: organization/type: folder/g" my_guest_project.yaml -sed -i "s//$ParentFolderID/g" my_guest_project.yaml -sed -i "s//$BillingID/g" my_guest_project.yaml -sed -i "s/test-vpc-host-project/$ProjectUniqueNameH/g" my_guest_project.yaml -sed -i "s/subnet-1/test-subnetwork-1/g" my_guest_project.yaml - -# Create the guest project and attach it to the host project -gcloud deployment-manager deployments create my-guest-project --config my_guest_project.yaml -``` - -## Note - -- Some templates are updated since the recording of the video, there are more detailed examples - available for the project template \ No newline at end of file diff --git a/dm/src/cft b/dm/src/cft deleted file mode 100755 index a60e84cac42..00000000000 --- a/dm/src/cft +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -# -# Copyright 2018 Google Inc. All Rights Reserved. -# - -# Finds the directory name for this script, and source the env -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )" -source "$DIR/cftenv" - -# Executes cft -exec python -m "cloud_foundation_toolkit.cli" "$@" diff --git a/dm/src/cftenv b/dm/src/cftenv deleted file mode 100755 index 026a60c7527..00000000000 --- a/dm/src/cftenv +++ /dev/null @@ -1,124 +0,0 @@ -#!/bin/sh -# -# Copyright 2013 Google Inc. All Rights Reserved. -# - -# -# -# CLOUDSDK_ROOT_DIR (a) installation root dir -# CLOUDSDK_PYTHON (u) python interpreter path -# CLOUDSDK_PYTHON_ARGS (u) python interpreter arguments -# CLOUDSDK_PYTHON_SITEPACKAGES (u) use python site packages -# -# (a) always defined by the preamble -# (u) user definition overrides preamble - -# Determines the real cloud sdk root dir given the script path. -# Would be easier with a portable "readlink -f". -_cloudsdk_root_dir() { - case $1 in - /*) _cloudsdk_path=$1 - ;; - */*) _cloudsdk_path=$PWD/$1 - ;; - *) _cloudsdk_path=$(which "$1") - case $_cloudsdk_path in - /*) ;; - *) _cloudsdk_path=$PWD/$_cloudsdk_path ;; - esac - ;; - esac - _cloudsdk_dir=0 - while : - do - while _cloudsdk_link=$(readlink "$_cloudsdk_path") - do - case $_cloudsdk_link in - /*) _cloudsdk_path=$_cloudsdk_link ;; - *) _cloudsdk_path=$(dirname "$_cloudsdk_path")/$_cloudsdk_link ;; - esac - done - case $_cloudsdk_dir in - 1) break ;; - esac - if [ -d "${_cloudsdk_path}" ]; then - break - fi - _cloudsdk_dir=1 - _cloudsdk_path=$(dirname "$_cloudsdk_path") - done - while : - do case $_cloudsdk_path in - */) _cloudsdk_path=$(dirname "$_cloudsdk_path/.") - ;; - */.) _cloudsdk_path=$(dirname "$_cloudsdk_path") - ;; - */bin) dirname "$_cloudsdk_path" - break - ;; - *) echo "$_cloudsdk_path" - break - ;; - esac - done -} - -GCLOUD_PATH=$(which gcloud) -#CLOUDSDK_ROOT_DIR=$(_cloudsdk_root_dir "$0") -CLOUDSDK_ROOT_DIR=$(_cloudsdk_root_dir "$GCLOUD_PATH") - -# if CLOUDSDK_PYTHON is empty -if [ -z "$CLOUDSDK_PYTHON" ]; then - # if python2 exists then plain python may point to a version != 2 - if which python2 >/dev/null; then - CLOUDSDK_PYTHON=python2 - elif which python2.7 >/dev/null; then - # this is what some OS X versions call their built-in Python - CLOUDSDK_PYTHON=python2.7 - elif which python >/dev/null; then - # Use unversioned python if it exists. - CLOUDSDK_PYTHON=python - elif which python3 >/dev/null; then - # We support python3, but only want to default to it if nothing else is - # found. - CLOUDSDK_PYTHON=python3 - else - # This won't work because it wasn't found above, but at this point this - # is our best guess for the error message. - CLOUDSDK_PYTHON=python - fi -fi - -# $PYTHONHOME can interfere with gcloud. Users should use -# CLOUDSDK_PYTHON to configure which python gcloud uses. -unset PYTHONHOME - -# if CLOUDSDK_PYTHON_SITEPACKAGES and VIRTUAL_ENV are empty -case :$CLOUDSDK_PYTHON_SITEPACKAGES:$VIRTUAL_ENV: in -:::) # add -S to CLOUDSDK_PYTHON_ARGS if not already there - case " $CLOUDSDK_PYTHON_ARGS " in - *" -S "*) ;; - " ") CLOUDSDK_PYTHON_ARGS="-S" - ;; - *) CLOUDSDK_PYTHON_ARGS="$CLOUDSDK_PYTHON_ARGS -S" - ;; - esac - unset CLOUDSDK_PYTHON_SITEPACKAGES - ;; -*) # remove -S from CLOUDSDK_PYTHON_ARGS if already there - while :; do - case " $CLOUDSDK_PYTHON_ARGS " in - *" -S "*) CLOUDSDK_PYTHON_ARGS=${CLOUDSDK_PYTHON_ARGS%%-S*}' '${CLOUDSDK_PYTHON_ARGS#*-S} ;; - *) break ;; - esac - done - # if CLOUDSDK_PYTHON_SITEPACKAGES is empty - [ -z "$CLOUDSDK_PYTHON_SITEPACKAGES" ] && - CLOUDSDK_PYTHON_SITEPACKAGES=1 - export CLOUDSDK_PYTHON_SITEPACKAGES - ;; -esac - -export CLOUDSDK_ROOT_DIR CLOUDSDK_PYTHON_ARGS -export PYTHONPATH="${CLOUDSDK_ROOT_DIR}/lib/third_party:${CLOUDSDK_ROOT_DIR}/lib" -# diff --git a/dm/src/cloud_foundation_toolkit/__init__.py b/dm/src/cloud_foundation_toolkit/__init__.py deleted file mode 100644 index 93b0ec325e7..00000000000 --- a/dm/src/cloud_foundation_toolkit/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -import logging -import pkg_resources - -from googlecloudsdk.core.credentials import store as creds_store - -# Setup logging and expose Logger object to the rest of the project -LOG = logging.getLogger("cft") -LOG.addHandler(logging.StreamHandler()) -LOG.propagate = False - -__VERSION__ = pkg_resources.get_distribution(__name__).version - -# Register credentials providers - for instance SA, etc -credential_providers = [ - creds_store.DevShellCredentialProvider(), - creds_store.GceCredentialProvider(), -] -for provider in credential_providers: - provider.Register() diff --git a/dm/src/cloud_foundation_toolkit/actions.py b/dm/src/cloud_foundation_toolkit/actions.py deleted file mode 100644 index caa47bebc50..00000000000 --- a/dm/src/cloud_foundation_toolkit/actions.py +++ /dev/null @@ -1,141 +0,0 @@ -# Copyr ight2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" Deployment Actions """ - -import glob -import json -import os.path -import sys - -from apitools.base.py import exceptions as apitools_exceptions -from ruamel.yaml import YAML - -from cloud_foundation_toolkit import LOG -from cloud_foundation_toolkit.deployment import Config, ConfigGraph, Deployment - -# To avoid code repetition this ACTION_MAP is used to translate the -# args provided to the cmd line to the appropriate method of the -# deployment object -ACTION_MAP = { - 'apply': { - 'preview': 'preview' - }, - 'create': { - 'preview': 'preview' - }, - 'delete': {}, - 'update': { - 'preview': 'preview' - } -} - - -def check_file(config): - extensions = ['.yaml', '.yml', '.jinja'] - for ext in extensions: - if ext == config[-len(ext):]: - return True - - -def get_config_files(config): - """ Build a list of config files - List could have files directory or yaml strings - - Args(list): List of configs. Each item can be a file, a directory, - or a yaml string - - Returns: A list of config files or strings - """ - - config_files = [] - - for conf in config: - if os.path.isdir(conf): - config_files.extend( - [f for f in glob.glob(conf + '/*') if check_file(f)] - ) - else: - config_files.append(conf) - - LOG.debug('Config files %s', config_files) - return config_files - - -def execute(args): - action = args.action - - if action == 'delete' or (hasattr(args, 'reverse') and args.reverse): - graph = reversed( - ConfigGraph(get_config_files(args.config), - project=args.project) - ) - else: - graph = ConfigGraph(get_config_files(args.config), project=args.project) - - arguments = {} - for k, v in vars(args).items(): - if k in ACTION_MAP.get(action, {}): - arguments[ACTION_MAP[action][k]] = v - - LOG.debug( - 'Excuting %s on %s with arguments %s', - action, - args.config, - arguments - ) - - if args.show_stages: - output = [] - for level in graph: - configs = [] - for config in level: - configs.append( - { - 'project': config.project, - 'deployment': config.deployment, - 'source': config.source - } - ) - output.append(configs) - if args.format == 'yaml': - YAML().dump(output, sys.stdout) - elif args.format == 'json': - print(json.dumps(output, indent=2)) - else: - for i, stage in enumerate(output, start=1): - print('---------- Stage {} ----------'.format(i)) - for config in stage: - print( - ' - project: {}, deployment: {}, source: {}'.format( - config['project'], - config['deployment'], - config['source'] - ) - ) - print('------------------------------') - - else: - for i, stage in enumerate(graph, start=1): - print('---------- Stage {} ----------'.format(i)) - for config in stage: - LOG.debug('%s config %s', action, config.deployment) - deployment = Deployment(config) - method = getattr(deployment, action) - try: - method(**arguments) - except apitools_exceptions.HttpNotFoundError: - LOG.warn('Deployment %s does not exit', config.deployment) - if action != 'delete': - raise - print('------------------------------') diff --git a/dm/src/cloud_foundation_toolkit/cli.py b/dm/src/cloud_foundation_toolkit/cli.py deleted file mode 100755 index 6cdc5fc1773..00000000000 --- a/dm/src/cloud_foundation_toolkit/cli.py +++ /dev/null @@ -1,138 +0,0 @@ -#!/usr/bin/env python -# Copyright 2017 Gustavo Baratto. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" Cloud Foundation Toolkit CLI """ - -from __future__ import print_function -import argparse -import sys - -from cloud_foundation_toolkit import __VERSION__ as CFT_VERSION -from cloud_foundation_toolkit import LOG -from cloud_foundation_toolkit.actions import execute - - -def build_common_args(parser): - """ Configures arguments to all actions/subparsers """ - - parser.add_argument( - 'config', - type=str, - nargs='+', - help='The path to the config files or directory' - ) - parser.add_argument( - '--show-stages', - '-s', - action='store_true', - default=False, - help=( - 'If specified, only displays the yaml representing the dependency ' - 'graph for the action' - ) - ) - parser.add_argument( - '--format', - '-f', - choices=['human', - 'yaml', - 'json'], - default='human', - help='The format of the output' - ) - - -def parse_args(args): - """parse CLI options """ - parser = argparse.ArgumentParser('cft') - - parser.add_argument( - '--version', - '-v', - action='version', - version=CFT_VERSION, - help='Print version information and exit' - ) - parser.add_argument( - '--project', - default=None, - help=( - 'The ID of the GCP project in which ALL config files will be ' - 'executed. This option will override the "project" directive in ' - 'the config files, so be careful when using this' - ) - ) - parser.add_argument('--verbosity', default='warning', help='The log level') - - # subparser for each action - subparser_obj = parser.add_subparsers(dest='action') - actions = ['apply', 'create', 'delete', 'update'] - - subparsers = {} - for action in actions: - subparsers[action] = subparser_obj.add_parser(action) - build_common_args(subparsers[action]) - - # action-specficic arguments - # - # create - subparsers['create'].add_argument( - '--preview', - '-p', - action='store_true', - default=False, - help='Preview changes' - ) - - # update - subparsers['update'].add_argument( - '--preview', - '-p', - action='store_true', - default=False, - help='Preview changes' - ) - - # upsert - subparsers['apply'].add_argument( - '--preview', - '-p', - action='store_true', - default=False, - help='Preview changes' - ) - subparsers['apply'].add_argument( - '--reverse', - '-r', - action='store_true', - default=False, - help='Whether to apply changes in reverse order' - ) - - return parser.parse_args(args) - - -def main(): - """ CLI entry point""" - - # Parse CLI arguments - args = parse_args(sys.argv[1:]) - - # logging - LOG.setLevel(args.verbosity.upper()) - execute(args) - - -if __name__ == '__main__': - main() diff --git a/dm/src/cloud_foundation_toolkit/deployment.py b/dm/src/cloud_foundation_toolkit/deployment.py deleted file mode 100644 index c19ce4c780a..00000000000 --- a/dm/src/cloud_foundation_toolkit/deployment.py +++ /dev/null @@ -1,753 +0,0 @@ -import base64 -from collections import namedtuple -import io -import os -import os.path -import re -from six.moves import input -import sys -import tempfile - -from apitools.base.py import exceptions as apitools_exceptions -from googlecloudsdk.api_lib.deployment_manager import dm_api_util -from googlecloudsdk.api_lib.deployment_manager import dm_base -from googlecloudsdk.api_lib.deployment_manager import exceptions as dm_exceptions -from googlecloudsdk.command_lib.deployment_manager import dm_util -from googlecloudsdk.command_lib.deployment_manager import dm_write -from googlecloudsdk.command_lib.deployment_manager import flags -from googlecloudsdk.command_lib.deployment_manager.importer import BuildConfig -from googlecloudsdk.command_lib.deployment_manager.importer import BuildTargetConfig -from googlecloudsdk.core.resource import resource_printer -from googlecloudsdk.third_party.apis.deploymentmanager.v2 import deploymentmanager_v2_messages as messages -import jinja2 -import networkx as nx - -from cloud_foundation_toolkit import LOG -from cloud_foundation_toolkit.dm_utils import DM_API -from cloud_foundation_toolkit.dm_utils import DM_OUTPUT_QUERY_REGEX -from cloud_foundation_toolkit.dm_utils import DMOutputQueryAttributes -from cloud_foundation_toolkit.dm_utils import get_deployment -from cloud_foundation_toolkit.dm_utils import get_deployment_output -from cloud_foundation_toolkit.dm_utils import parse_dm_output_url -from cloud_foundation_toolkit.dm_utils import parse_dm_output_token -from cloud_foundation_toolkit.yaml_utils import CFTBaseYAML - -Node = namedtuple('Node', ['project', 'deployment']) - - -def ask(): - """Function that asks for user input from stdin.""" - answer = input("Update(u), Skip (s), or Abort(a) Deployment? ") - while answer not in ['u', 's', 'a']: - answer = input("Update(u), Skip (s), or Abort(a) Deployment? ") - return answer - - -class Config(object): - """Class representing a CFT config. - - Attributes: - as_file (io.StringIO): A file-like interface to the - jinja-rendered config. - as_string (string): the jinja-rendered config. - id (string): A base64-encoded id representing the path or raw - content of the config. Could be used as a dict key. - source (string): The path or the raw content of config (obtained - by base64-decoding the 'id' attribute - """ - yaml = CFTBaseYAML() - - def __init__(self, item, project=None): - """ Contructor """ - - self.source = item - if project: - self._project = project - - if os.path.exists(item): - with io.open(item) as _fd: - self.as_string = jinja2.Template(_fd.read() - ).render(env=os.environ) - else: - self.as_string = jinja2.Template(item).render(env=os.environ) - - # YAML gets parsed twice: - # 1. Here, to figure out deployment name, project and dependency list. - # 2. When the Deployment() obj gets instantiated (to get the value of - # the output from the DM API) - # This approach takes more CPU, but it's less error prone than - # scanning the file ourselves. - self.as_dict = self.yaml.load(self.as_string) - - @property - def as_file(self): - return io.StringIO(self.as_string) - - @property - def id(self): - return Node(self.project, self.deployment) - - @property - def deployment(self): - return self.as_dict.get( - 'name', - os.path.basename(self.source).split('.')[0] - ) - - @property - def project(self): - """ Sets the project for the config - - This is a bit complicated but allows for quite a bit of - flexibility. The project can be defined in a few different - places, and this is the order on precedence: - - 1- Command line - 2- Config file - 3- CLOUD_FOUNDATION_PROJECT_ID environment variable - 4- The GCP SDK configuration - """ - if not hasattr(self, '_project'): - self._project = self.as_dict.get('project') or \ - os.environ.get('CLOUD_FOUNDATION_PROJECT_ID') or \ - dm_base.GetProject() - return self._project - - @property - def dependencies(self): - """ - """ - if hasattr(self, '_dependencies'): - return self._dependencies - - self._dependencies = set() - for line in self.as_file.readlines(): - # Ignore comments - if re.match(r'^\s*#', line): - continue - - # Match !DMOutput, $(out.x.y.w.z), etc tokens - for match in DM_OUTPUT_QUERY_REGEX.finditer(line): - for k, v in match.groupdict().items(): - if not v: - continue - if k == 'url': - url = parse_dm_output_url(v, self.project) - elif k == 'token': - url = parse_dm_output_token(v, self.project) - self._dependencies.add(Node(url.project, url.deployment)) - - return self._dependencies - - def __repr__(self): - return '{}({}:{})'.format(self.__class__, self.deployment, self.project) - - -class ConfigGraph(object): - """ Class representing the dependency graph between configs - - This is a container class holding the dependencies between configs. - An instance of this class be be used as an iterator over the - "levels" of dependencies. - - ``` - graph = ConfigGraph(["config-1.yaml", "config-2.yaml"]) - for level in graph: - for config in level: - deployment = Deployment(config) - ... - ``` - - Attributes: - graph(networkx.DiGraph): A networkx DiGraph() - roots(list): List of all root nodes in the graph - levels(list): List of dependency levels. Each element in the - list is another list of nodes that can be processed in - parallel. - - """ - - def __init__(self, configs, project=None): - """ Constructor """ - - # Populate the config dict - self.configs = { - c.id: c for c in (Config(x, - project=project) for x in configs) - } - - @property - def graph(self): - if hasattr(self, '_graph'): - return self._graph - self._graph = nx.DiGraph() - for _, config in self.configs.items(): - node = Node(config.project, config.deployment) - self._graph.add_node(node) - for dependency in config.dependencies: - self.graph.add_edge(dependency, node) - - if not nx.is_directed_acyclic_graph(self._graph): - raise SystemExit('Cyclic dependency in the graph') - return self._graph - - @property - def roots(self): - if not hasattr(self, '_roots'): - self._roots = [ - n for n in self.sort() if not list(self.graph.predecessors(n)) - ] - return self._roots - - @property - def levels(self): - if hasattr(self, '_levels'): - return self._levels - - graph = self.graph.copy() - remaining_nodes = list(self.sort()) - self._levels = [] - - while remaining_nodes: - level_nodes, level_configs = [], [] - - # Find the nodes in the level - for node in remaining_nodes: - if not nx.ancestors(graph, node): - level_nodes.append(node) - - # Find and load configs in the level - # If a node is not in a provided config, it must be a - # dependency, so we make sure it exists in DM, without - # attempting to load an unexisting config - for node in level_nodes: - remaining_nodes.remove(node) - graph.remove_node(node) - - if node in self.configs: - level_configs.append(self.configs[node]) - else: - deployment = get_deployment(node.project, node.deployment) - if not deployment: - raise SystemExit( - 'Unresolved dependency. Resource {}, on which' - 'other resources depended, neither was specified' - 'in the submitted congigs nor existed in' - 'Deployment Manager'.format(node) - ) - - if level_configs: - self._levels.append(level_configs) - - return self._levels - - def __iter__(self): - """ Makes this class an iterator. - - Notice the iterator over `self.levels` not `self` - """ - return iter(self.levels) - - def __reversed__(self): - """ Class can be iterated in reverse order. """ - return reversed(self.levels) - - def sort(self, reverse=False): - """ Sorts the graph in topological order. - - - Args: - reverse(boolean): Whether to return the nodes in reverse - order on not. - - Returns: A generator of nodes sorted by topology, ie the - elements are returned sequentially in order of dependency ( - independent nodes come first, unless 'reverse' is used. - """ - generator = nx.topological_sort(self.graph) - if reverse: - return reversed(list(generator)) - return generator - - -class Deployment(DM_API): - """Class representing a CFT deployment. - - This class makes extensive use of the Google Cloud SDK. Relevant files to - understand some of this code: - https://github.com/google-cloud-sdk/google-cloud-sdk/tree/master/lib/surface/deployment_manager/deployments - https://github.com/google-cloud-sdk/google-cloud-sdk/blob/master/lib/googlecloudsdk/third_party/apis/deploymentmanager/v2/deploymentmanager_v2_messages.py - https://github.com/google-cloud-sdk/google-cloud-sdk/blob/master/lib/googlecloudsdk/third_party/apis/deploymentmanager/v2/deploymentmanager_v2_client.py - - Attributes: - config(dict): A dict holding the config for this deployment. - current(Deployment): A Deployment object from the SDK, or None. - This attribute is None until self.get() called. If the - deployment doesn't exist in DM, it remains None. - dm_config(dict): A dict built from the CFT config holding keys - that DM can handle. - target_config(TargetConfiguration): A TargetConfiguration object from - the SDK. - """ - - # Number of seconds to wait for a create/update/delete operation - OPERATION_TIMEOUT = 20 * 60 # 20 mins. Same as gcloud - - # The keys required by a DM config (not CFT config) - DM_CONFIG_KEYS = ['imports', 'resources', 'outputs', 'configVersion'] - - def __init__(self, config): - """ The class constructor - - Args: - config_item (configItem): A dict representing CFT config. - Normally provided when creating/updating a deployment. - """ - - # Resolve custom yaml tags only during deployment instantiation - # because if parsed earlier, the DM queries implemented for the - # tags would likely fail with 404s - self.yaml = CFTBaseYAML() - self.yaml.Constructor.add_constructor( - '!DMOutput', - self.yaml_dm_output_constructor - ) - self._config = config - - # Regex search/replace before loading the yaml - self.config = self.yaml.load(config.as_string) - self.yaml_walk(self.config) - - self.config['project'] = self._config.project - self.config['name'] = self._config.deployment - - self.tmp_file_path = None - - LOG.debug('==> %s', self.config) - self.current = None - - def yaml_walk(self, yaml_tree): - """ Custom function for walking through the config and checking every string if its a regexp match - - In place walk over the config yaml. In case of a string togen it replaces the token with the complex - YAML value of the reference. - - The function is able to walk through lists and dictionarries. It ignores boolm, int and double values. - """ - if isinstance(yaml_tree, dict): - for k, v in yaml_tree.items(): ## Walk each element in dictionary - yaml_tree[k] = self.yaml_replace(v) - elif isinstance(yaml_tree, list): - for i, v in enumerate(yaml_tree): ## Walk each element in list - yaml_tree[i] = self.yaml_replace(v) - - def yaml_replace(self, v): - if isinstance(v, str): - match = DM_OUTPUT_QUERY_REGEX.match(v) - if match is not None: - return self.get_dm_output(match) - else: - self.yaml_walk(v) ## Not string, recursive walk - return v - - def get_dm_output(self, match): - """ Custom function for the regex.match() - - This function gets executed everytime there's a match on one - tokens used to represent the cross-deployment references ( - !DMOutput, $(out.x.y.w.z), etc. - - Args: - match (re.MatchObject): A regex matche object - - Returns: A string with the value of the deployment output - """ - - for k, v in match.groupdict().items(): - if not v: - continue - if k == 'url': - query_attributes = parse_dm_output_url(v, self._config.project) - elif k == 'token': - query_attributes = parse_dm_output_token( - v, - self._config.project - ) - return get_deployment_output( - query_attributes.project, - query_attributes.deployment, - query_attributes.resource, - query_attributes.name - ) - - def yaml_dm_output_constructor(self, loader, node): - """ Implements the !DMOutput yaml tag - - The tag takes string represeting an DM item URL. - - Example: - network: !DMOutput dm://${project}/${deployment}/${resource}/${name} - """ - - data = loader.construct_scalar(node) - url = parse_dm_output_url(data, self._config.project) - return get_deployment_output( - url.project, - url.deployment, - url.resource, - url.name - ) - - @property - def dm_config(self): - """Returns a dict with keys that DM can handle. - - Args: - - Return: A dict representing a valid DM config (not CFT config) - - TODO (gus): Could a dictview be used here? - """ - - return { - k: v for k, - v in self.config.items() if k in self.DM_CONFIG_KEYS - } - - @property - def target_config(self): - """Returns the 'target config' for the deployment. - - The 'import code' is very complex and error prone. Instead - of rewriting it here, the code from the SDK/gcloud is being - reused. - The SDK code only works with actual files, not strings, so - the processed configs to are written to temporary files then - fed to the SDK code to handle the imports. - - Args: - - Returns: None - """ - self.write_tmp_file() - target = BuildTargetConfig(messages, config=self.tmp_file_path) - self.delete_tmp_file() - return target - - def write_tmp_file(self): - """ Writes the yaml dump of the deployment to a temp file. - - This temporary file is always created in the current directory, - not in the directory where the config file is. - - Args: - - Returns: None - """ - - with tempfile.NamedTemporaryFile(dir=os.getcwd(), delete=False) as tmp: - self.yaml.dump(self.dm_config, tmp) - self.tmp_file_path = tmp.name - - def delete_tmp_file(self): - """ Delete the temporary config file """ - - os.remove(self.tmp_file_path) - - def get(self): - """ Returns a Deployment() message(obj) from the DM API. - - Shortcut to deployments.Get() that doesn't raise an exception - When deployment doesn't exit. - - This method also updates the 'current' attribute with the latest - data from the DM API. - - Args: - - Returns: A Deployment object from the SDK or None - """ - - self.current = get_deployment( - project=self.config['project'], - deployment=self.config['name'] - ) - return self.current - - def delete(self, delete_policy=None): - """Deletes this deployment from DM. - - Args: - delete_policy (str): The strings 'ABANDON' or 'DELETE'. - The default (None), doesn't include the policy in the - request obj, which translates 'DELETE' as default. - - Returns: None - """ - - message = self.messages.DeploymentmanagerDeploymentsDeleteRequest - request = message( - deployment=self.config['name'], - project=self.config['project'] - ) - - if delete_policy: - request['deletePolicy' - ] = message.DeletePolicyValueValuesEnum(delete_policy) - - LOG.debug('Deleting deployment %', self.config['name'], request) - - # The actual operation. - # No exception handling is done here to allow higher level - # functions to do so. - operation = self.client.deployments.Delete(request) - - # Wait for operation to finish - self.wait(operation) - - def create(self, preview=False, create_policy=None): - """Creates this deployment in DM. - - Args: - preview (boolean): If True, create is done with preview. - create_policy (str): The strings 'ACQUIRE' or 'CREATE_OR_ACQUIRE'. - The default (None), doesn't include the policy in the - request obj, which translates 'CREATE_OR_ACQUIRE' as default. - - Returns: None - """ - - deployment = self.messages.Deployment( - name=self.config['name'], - target=self.target_config - ) - - message = self.messages.DeploymentmanagerDeploymentsInsertRequest - request = message( - deployment=deployment, - project=self.config['project'], - preview=preview - ) - if create_policy: - request['createPolicy' - ] = message.CreatePolicyValueValuesEnum(create_policy) - LOG.debug( - 'Creating deployment %s with data %s', - self.config['name'], - request - ) - - # The actual operation. - # No exception handling is done here to allow higher level - # functions to do so. - operation = self.client.deployments.Insert(request) - - # Wait for operation to finish - self.wait(operation) - self.print_resources_and_outputs() - return self.current - - -# -# if preview: -# func = self.confirm_preview() -# func() -# elif getattr(self.current, 'update', False): -# self.update_preview() -# - - def update(self, preview=False, create_policy=None, delete_policy=None): - """Updates this deployment in DM. - - If the deployment is already in preview mode in DM, the existing - preview operation will be overwritten by this one. - - Args: - preview (boolean): If True, update is done with preview. - create_policy (str): The strings 'ACQUIRE' or 'CREATE_OR_ACQUIRE'. - The default (None), doesn't include the policy in the - request obj, which translates 'CREATE_OR_ACQUIRE' as default. - delete_policy (str): The strings 'ABANDON' or 'DELETE'. - The default (None), doesn't include the policy in the - request obj, which translates 'DELETE' as default. - - Returns: None - """ - - # Get current deployment to figure out the fingerprint - self.get() - if not self.current: - raise SystemExit( - 'Error updating {}: Deployment does not exist'.format( - self.config['name'] - ) - ) - - new_deployment = self.messages.Deployment( - name=self.config['name'], - target=self.target_config, - fingerprint=self.current.fingerprint or b'' - ) - - message = self.messages.DeploymentmanagerDeploymentsUpdateRequest - - # getattr() below overwrites existing preview mode as targets - # cannot be sent when deployment is already in preview mode - request = message( - deployment=self.config['name'], - deploymentResource=new_deployment, - project=self.config['project'], - preview=preview or bool(getattr(self.current, - 'update', - False)) - ) - if delete_policy: - request['deletePolicy' - ] = message.DeletePolicyValueValuesEnum(delete_policy) - if create_policy: - request['createPolicy' - ] = message.CreatePolicyValueValuesEnum(create_policy) - - LOG.debug( - 'Updating deployment %s with data %s', - self.config['name'], - request - ) - - # The actual operation. - # No exception handling is done here to allow higher level - # functions to do so. - operation = self.client.deployments.Update(request) - - # Wait for operation to finish - self.wait(operation) - - self.print_resources_and_outputs() - - if preview: - func = self.confirm_preview() - func() - elif getattr(self.current, 'update', False): - self.update_preview() - - def confirm_preview(self): - answer = ask() - - if answer == 'u': - return self.update_preview - elif answer == 's': - return self.cancel_preview - elif answer == 'a': - raise SystemExit('Aborting deployment run!') - else: - raise SystemExit('Not a valid answer: {}'.format(answer)) - - def update_preview(self): - """Confirms an update preview. - - The request to the API doesn't include the target - - Args: - - Returns: - """ - deployment = self.messages.Deployment( - name=self.config['name'], - fingerprint=self.current.fingerprint or b'' - ) - request = self.messages.DeploymentmanagerDeploymentsUpdateRequest( - deployment=self.config['name'], - deploymentResource=deployment, - project=self.config['project'], - preview=False - ) - operation = self.client.deployments.Update(request) - self.wait(operation, 'update preview') - self.print_resources_and_outputs() - - def wait(self, operation, action=None, get=True): - """Waits for a DM operation to be completed. - - Args: - operation (Operation): An Operation object from the SDK. - action (string): Any operation name to be used in the - ticker. If not specified, the operation type is used. - get (boolean): wether to retrieve the latest deployment - info from the API to obtain the current fingerprint. - """ - # This saves an API call if the self.get() was called just - # before calling this method - if get: - self.get() - - action = action or operation.operationType - - dm_write.WaitForOperation( - self.client, - self.messages, - operation.name, - project=self.config['project'], - timeout=self.OPERATION_TIMEOUT, - operation_description='{} {} (fingerprint {})'.format( - action, - self.config['name'], - base64.urlsafe_b64encode(self.current.fingerprint) - ) - ) - return self.get() - - def cancel_preview(self): - """Cancels a deployment preview. - - If a deployment is in preview mode, the update is cancelled and - no resourced are changed - - Args: - - Returns: - """ - cancel_msg = self.messages.DeploymentsCancelPreviewRequest( - fingerprint=self.current.fingerprint or b'' - ) - req = self.messages.DeploymentmanagerDeploymentsCancelPreviewRequest( - deployment=self.config['name'], - deploymentsCancelPreviewRequest=cancel_msg, - project=self.config['project'] - ) - operation = self.client.deployments.CancelPreview(req) - self.wait(operation) - - def apply(self, preview=False, create_policy=None, delete_policy=None): - """Creates or updates this deployment in DM. - - Args: - preview (boolean): If True, update is done with preview. - create_policy (str): The strings 'ACQUIRE' or 'CREATE_OR_ACQUIRE'. - The default (None), doesn't include the policy in the - request obj, which translates 'CREATE_OR_ACQUIRE' as default. - delete_policy (str): The strings 'ABANDON' or 'DELETE'. - The default (None), doesn't include the policy in the - request obj, which translates 'DELETE' as default. - - Returns: None - """ - try: - self.create() - except apitools_exceptions.HttpConflictError as err: - self.update(preview=preview) - - def print_resources_and_outputs(self): - """Prints the Resources and Outputs of this deployment.""" - - rsp = dm_api_util.FetchResourcesAndOutputs( - self.client, - self.messages, - self.config['project'], - self.config['name'], - # self.ReleaseTrack() is base.ReleaseTrack.ALPHA - ) - - printer = resource_printer.Printer(flags.RESOURCES_AND_OUTPUTS_FORMAT) - printer.AddRecord(rsp) - printer.Finish() - return rsp diff --git a/dm/src/cloud_foundation_toolkit/dm_utils.py b/dm/src/cloud_foundation_toolkit/dm_utils.py deleted file mode 100644 index 01ab2348f63..00000000000 --- a/dm/src/cloud_foundation_toolkit/dm_utils.py +++ /dev/null @@ -1,118 +0,0 @@ -from collections import namedtuple -import io -import re -from six.moves.urllib.parse import urlparse - -from apitools.base.py import exceptions as apitools_exceptions -from googlecloudsdk.api_lib.deployment_manager import dm_base -from ruamel.yaml import YAML - -DM_OUTPUT_QUERY_REGEX = re.compile( - r'!DMOutput\s+(?P\bdm://[-/a-zA-Z0-9]+\b)|' - r'\$\(out\.(?P[-.a-zA-Z0-9]+)\)' -) - -DMOutputQueryAttributes = namedtuple( - 'DMOutputQueryAttributes', - ['project', - 'deployment', - 'resource', - 'name'] -) - - -@dm_base.UseDmApi(dm_base.DmApiVersion.V2) -class DM_API(dm_base.DmCommand): - """ Class representing the DM API - - This a proxy class only, so other modules in this project - only import this local class instead of gcloud's. Here's the source: - - https://github.com/google-cloud-sdk/google-cloud-sdk/blob/master/lib/googlecloudsdk/api_lib/deployment_manager/dm_base.py - """ - - -API = DM_API() - - -def get_deployment(project, deployment): - try: - return API.client.deployments.Get( - API.messages.DeploymentmanagerDeploymentsGetRequest( - project=project, - deployment=deployment - ) - ) - except apitools_exceptions.HttpNotFoundError as _: - return None - - -def get_manifest(project, deployment): - deployment_rsp = get_deployment(project, deployment) - - return API.client.manifests.Get( - API.messages.DeploymentmanagerManifestsGetRequest( - project=project, - deployment=deployment, - manifest=deployment_rsp.manifest.split('/')[-1] - ) - ) - - -def parse_dm_output_url(url, project=''): - error_msg = ( - 'The url must look like ' - '"dm://${project}/${deployment}/${resource}/${name}" or' - '"dm://${deployment}/${resource}/${name}"' - ) - parsed_url = urlparse(url) - if parsed_url.scheme != 'dm': - raise ValueError(error_msg) - path = parsed_url.path.split('/')[1:] - - # path == 2 if project isn't specified in the URL - # path == 3 if project is specified in the URL - if len(path) == 2: - args = [project] + [parsed_url.netloc] + path - elif len(path) == 3: - args = [parsed_url.netloc] + path - else: - raise ValueError(error_msg) - - return DMOutputQueryAttributes(*args) - - -def parse_dm_output_token(token, project=''): - error_msg = ( - 'The url must look like ' - '$(out.${project}.${deployment}.${resource}.${name}" or ' - '$(out.${deployment}.${resource}.${name}"' - ) - parts = token.split('.') - - # parts == 3 if project isn't specified in the token - # parts == 4 if project is specified in the token - if len(parts) == 3: - return DMOutputQueryAttributes(project, *parts) - elif len(parts) == 4: - return DMOutputQueryAttributes(*parts) - else: - raise ValueError(error_msg) - -def get_deployment_output(project, deployment, resource, name): - manifest = get_manifest(project, deployment) - layout = YAML().load(manifest.layout) - return traverse_resource_output(layout, resource, name) - -def traverse_resource_output(layout, resource, name): - for _resource in layout.get('resources', []): - if _resource['name'] == resource: - for output in _resource.get('outputs', []): - if output['name'] == name: - return output['finalValue'] - - #recursive traversal of complex resources to search for outputs. - output = traverse_resource_output(_resource, resource, name) - if output != []: - return output - return [] diff --git a/dm/src/cloud_foundation_toolkit/yaml_utils.py b/dm/src/cloud_foundation_toolkit/yaml_utils.py deleted file mode 100644 index 1dd28867889..00000000000 --- a/dm/src/cloud_foundation_toolkit/yaml_utils.py +++ /dev/null @@ -1,14 +0,0 @@ -from ruamel.yaml import YAML -from ruamel.yaml.compat import StringIO - - -class CFTBaseYAML(YAML): - - def dump(self, data, stream=None, **kwargs): - inefficient = False - if stream is None: - inefficient = True - stream = StringIO() - YAML.dump(self, data, stream, **kwargs) - if inefficient: - return stream.getvalue() diff --git a/dm/templates/README.md b/dm/templates/README.md deleted file mode 100644 index 270907c0951..00000000000 --- a/dm/templates/README.md +++ /dev/null @@ -1,32 +0,0 @@ -# CFT Templates - -This folder contains the library of templates included in the Cloud Foundation -toolkit (henceforth, CFT). - -## Overview - -Each template is stored in a folder named after the templated cloud resource; -e.g., "network", "cloud_router", "healthcheck", etc. Each template folder contains: - -- README.md - a textual description of the template's usage, prerequisites, etc. -- `resource`.py - the Python 2.7 template file -- `resource`.py.schema - the schema file associated with the template -- examples: - - `resource`.yaml - a sample config file that utilizes the template -- tests: - - integration: - - `resource`.yaml - a test config file - - `resource`.bats - a bats test harness for the test config - -## Usage - -You can use the templates included in the template library: - -- Via Google's Deployment Manager / gcloud as described in the - [Google SDK documentation](https://cloud.google.com/sdk/) -- Via the `CFT`, as described in the [CFT User Guide](../docs/userguide.md) - -You can use the templates "as is," and/or modify them to suit your needs, as -well as create new ones. Instructions and recommendations for template -development are in the -[Template Developer Guide](../docs/template_dev_guide.md). \ No newline at end of file diff --git a/dm/templates/autoscaler/README.md b/dm/templates/autoscaler/README.md deleted file mode 100644 index 4f9f514502f..00000000000 --- a/dm/templates/autoscaler/README.md +++ /dev/null @@ -1,70 +0,0 @@ -# Autoscaler - -This template creates an autoscaler. - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) -- Grant the [compute.admin](https://cloud.google.com/compute/docs/access/iam) -IAM role to the Deployment Manager service account - -## Deployment - -### Resources - -- [compute.v1.autoscaler](https://cloud.google.com/compute/docs/reference/latest/autoscalers) -- [compute.v1.regionalAutoscaler](https://cloud.google.com/compute/docs/reference/latest/regionAutoscalers) - -### Properties - -See the `properties` section in the schema file(s): - -- [Autoscaler](autoscaler.py.schema) - -### Usage - -1. Clone the [Deployment Manager samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit) - -```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit -``` - -2. Go to the [dm](../../) directory - -```shell - cd dm -``` - -3. Copy the example DM config to be used as a model for the deployment, in this - case [examples/autoscaler\_zonal.yaml](examples/autoscaler_zonal.yaml) - -```shell - cp templates/autoscaler/examples/autoscaler_zonal.yaml my_autoscaler.yaml -``` - -4. Change the values in the config file to match your specific GCP setup. - Refer to the properties in the schema files described above. - -```shell - vim my_autoscaler.yaml # <== change values to match your GCP setup -``` - -5. Create your deployment as described below, replacing - with your with your own deployment name - -```shell - gcloud deployment-manager deployments create \ - --config my_autoscaler.yaml -``` - -6. In case you need to delete your deployment: - -```shell - gcloud deployment-manager deployments delete -``` - -## Examples - -- [Zonal autoscaler](examples/autoscaler_zonal.yaml) -- [Regional autoscaler](examples/autoscaler_regional.yaml) diff --git a/dm/templates/autoscaler/autoscaler.py b/dm/templates/autoscaler/autoscaler.py deleted file mode 100644 index 713c3452153..00000000000 --- a/dm/templates/autoscaler/autoscaler.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" This template creates an autoscaler. """ - -REGIONAL_LOCAL_AUTOSCALER_TYPES = { - # https://cloud.google.com/compute/docs/reference/rest/v1/regionAutoscalers - True: 'gcp-types/compute-v1:regionAutoscalers', - # https://cloud.google.com/compute/docs/reference/rest/v1/autoscalers - False: 'gcp-types/compute-v1:autoscalers' -} - -def set_optional_property(receiver, source, property_name): - """ If set, copies the given property value from one object to another. """ - - if property_name in source: - receiver[property_name] = source[property_name] - -def set_autoscaler_location(autoscaler, is_regional, location): - """ Sets location-dependent properties of the autoscaler. """ - - name = autoscaler['name'] - location_prop_name = 'region' if is_regional else 'zone' - - autoscaler['type'] = REGIONAL_LOCAL_AUTOSCALER_TYPES[is_regional] - autoscaler['properties'][location_prop_name] = location - location_output = { - 'name': location_prop_name, - 'value': '$(ref.{}.{})'.format(name, location_prop_name) - } - - return location_output - -def generate_config(context): - """ Entry point for the deployment resources. """ - - properties = context.properties - name = properties.get('name', context.env['name']) - project = properties.get('project', context.env['project']) - target = properties['target'] - - policy = {} - - autoscaler = { - 'type': None, # Will be set up at a later stage. - 'name': context.env['name'], - 'properties': { - 'name': name, - 'project': project, - 'autoscalingPolicy': policy, - 'target': target - } - } - - policy_props = ['coolDownPeriodSec', - 'minNumReplicas', - 'maxNumReplicas', - 'customMetricUtilizations', - 'loadBalancingUtilization', - 'cpuUtilization'] - - for prop in policy_props: - set_optional_property(policy, properties, prop) - - is_regional = 'region' in properties - location = properties['region'] if is_regional else properties['zone'] - location_output = set_autoscaler_location(autoscaler, is_regional, location) - - set_optional_property(autoscaler['properties'], properties, 'description') - - return { - 'resources': [autoscaler], - 'outputs': [ - { - 'name': 'name', - 'value': name - }, - { - 'name': 'selfLink', - 'value': '$(ref.{}.selfLink)'.format(context.env['name']) - } - ] + [location_output] - } diff --git a/dm/templates/autoscaler/autoscaler.py.schema b/dm/templates/autoscaler/autoscaler.py.schema deleted file mode 100644 index 13f78184b76..00000000000 --- a/dm/templates/autoscaler/autoscaler.py.schema +++ /dev/null @@ -1,176 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Autoscaler - author: Sourced Group Inc. - version: 1.0.0 - description: | - Creates an autoscaler. - - For more information on this resource: - https://cloud.google.com/compute/docs/autoscaler/ - - APIs endpoints used by this template: - - gcp-types/compute-v1:autoscalers => - https://cloud.google.com/compute/docs/reference/rest/v1/autoscalers - - gcp-types/compute-v1:regionAutoscalers => - https://cloud.google.com/compute/docs/reference/rest/v1/regionAutoscalers - - -additionalProperties: false - -required: - - maxNumReplicas - - target - -anyOf: - - required: - - cpuUtilization - - required: - - loadBalancingUtilization - - required: - - customMetricUtilizations - -oneOf: - - required: - - region - - required: - - zone - -properties: - name: - type: string - description: The function name. Resource name would be used if omitted. - project: - type: string - description: | - The project ID of the project containing the autoscaler. - description: - type: string - description: The resource description. - zone: - type: string - description: | - The URL of the zone where the instance group resides (for autoscalers with the - zonal scope). - region: - type: string - description: | - The URL of the region where the instance group resides (for autoscalers with the - regional scope). - target: - type: string - description: | - The URL of the managed instance group the autoscaler is associated with. - coolDownPeriodSec: - type: integer - default: 60 - description: | - The number of seconds the autoscaler must wait before it starts - collecting information from a new instance. - minNumReplicas: - type: integer - default: 1 - minimum: 0 - description: | - The minimum number of replicas the autoscaler can scale down to. - maxNumReplicas: - type: integer - description: | - The maximum number of replicas the autoscaler can scale up to. - cpuUtilization: - type: object - additionalProperties: false - description: | - Defines the CPU utilization policy that allows the autoscaler to - scale based on the average CPU utilization of a managed instance group. - required: - - utilizationTarget - properties: - utilizationTarget: - type: number - minimum: 0 - maximum: 1 - description: | - The CPU utilization the autoscaler must maintain (as a target value). - loadBalancingUtilization: - type: object - additionalProperties: false - required: - - utilizationTarget - description: | - Configuration parameters for autoscaling based on the load balancer. - properties: - utilizationTarget: - type: number - minimum: 0 - maximum: 1 - description: The fraction of the back-end capacity utilization. - customMetricUtilizations: - type: array - description: | - Configuration parameters for autoscaling based on a custom metric. - items: - type: object - additionalProperties: false - required: - - metric - - utilizationTarget - - utilizationTargetType - properties: - metric: - type: string - description: | - The identifier (type) of the Stackdriver Monitoring metric. - utilizationTarget: - type: number - description: | - The value of the metric the autoscaler must maintain (as a target). - This must be a positive value. - utilizationTargetType: - type: string - default: GAUGE - enum: - - GAUGE - - DELTA_PER_SECOND - - DELTA_PER_MINUTE - description: | - The option that defines how the target utilization value - of the Stackdriver Monitoring metric is expressed. - -outputs: - name: - type: string - description: The autoscaler name. - selfLink: - type: string - description: The autoscaler URL. - region: - type: string - description: | - The region where the instance group resides (for regionally - scoped autoscalers). - zone: - type: string - description: | - The zone where the instance group resides (for zonally scoped - autoscalers). - -documentation: - - templates/autoscaler/README.md - -examples: - - templates/autoscaler/examples/autoscaler_zonal.yaml - - templates/autoscaler/examples/autoscaler_regional.yaml diff --git a/dm/templates/autoscaler/examples/autoscaler_regional.yaml b/dm/templates/autoscaler/examples/autoscaler_regional.yaml deleted file mode 100644 index 6cdb2d4cfaf..00000000000 --- a/dm/templates/autoscaler/examples/autoscaler_regional.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Example of the autoscaleer template usage. -# -# In this example, a regional autoscaler with -# multiple autoscaling rules is created. -# -# Replace the following with valid values: -# : a valid link to an -# existing regional instanceGroupManager -# : a region where the instanceGroupManager resides - -imports: - - path: templates/autoscaler/autoscaler.py - name: autoscaler.py - -resources: - - name: regional-autoscaler - type: autoscaler.py - properties: - region: - cpuUtilization: - utilizationTarget: 0.7 - customMetricUtilizations: - - metric: compute.googleapis.com/instance/disk/read_ops_count - utilizationTarget: 1000 - utilizationTargetType: DELTA_PER_SECOND - maxNumReplicas: 4 - target: diff --git a/dm/templates/autoscaler/examples/autoscaler_zonal.yaml b/dm/templates/autoscaler/examples/autoscaler_zonal.yaml deleted file mode 100644 index 52b868b6333..00000000000 --- a/dm/templates/autoscaler/examples/autoscaler_zonal.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Example of the autoscaleer template usage. -# -# In this example, a zonal autoscaler with multiple -# autoscaling rules is created. -# -# Replace the following with valid values: -# : a valid link to an existing -# zonal instanceGroupManager -# : a name of the zone where the instanceGroupManager -# resides - -imports: - - path: templates/autoscaler/autoscaler.py - name: autoscaler.py - -resources: - - name: zonal-autoscaler - type: autoscaler.py - properties: - zone: - cpuUtilization: - utilizationTarget: 0.7 - customMetricUtilizations: - - metric: compute.googleapis.com/instance/disk/read_ops_count - utilizationTarget: 1000 - utilizationTargetType: DELTA_PER_SECOND - maxNumReplicas: 4 - target: diff --git a/dm/templates/autoscaler/tests/integration/autoscaler.bats b/dm/templates/autoscaler/tests/integration/autoscaler.bats deleted file mode 100755 index d3c90518c00..00000000000 --- a/dm/templates/autoscaler/tests/integration/autoscaler.bats +++ /dev/null @@ -1,96 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" - # test specific variables - export REGION="us-east1" - export CPU_UTILIZATION_1="0.7" - export CUSTOM_METRIC="compute.googleapis.com/instance/disk/read_ops_count" - export CUSTOM_METRIC_TARGET="1000" - export CUSTOM_METRIC_TYPE="DELTA_PER_SECOND" - export NUM_REPLICAS="2" - export CPU_UTILIZATION_2="0.6" - export COOL_DOWN_PERIOD="70" - export ZONE="us-central1-c" - export DESCRIPTION="descr" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - envsubst < "templates/autoscaler/tests/integration/${TEST_NAME}.yaml" > "${CONFIG}" -} - -function delete_config() { - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - rm -f "${RANDOM_FILE}" - delete_config - fi - - # Per-test teardown steps. -} - - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - run gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config "${CONFIG}" --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] -} - -@test "Verifying that zonal autoscaler properties were set" { - run gcloud compute instance-groups managed describe "zonal-igm-${RAND}" \ - --format "yaml(autoscaler)" --zone "${ZONE}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "zonal-autoscaler-${RAND}" ]] - [[ "$output" =~ "coolDownPeriodSec: ${COOL_DOWN_PERIOD}" ]] - [[ "$output" =~ "utilizationTarget: ${CPU_UTILIZATION_2}" ]] - [[ "$output" =~ "description: ${DESCRIPTION}" ]] -} - -@test "Verifying that regional autoscaler properties were set" { - run gcloud compute instance-groups managed describe "regional-igm-${RAND}" \ - --format "yaml(autoscaler)" --region "${REGION}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "utilizationTarget: ${CPU_UTILIZATION_1}" ]] - [[ "$output" =~ "utilizationTarget: ${CUSTOM_METRIC_TARGET}.0" ]] - [[ "$output" =~ "maxNumReplicas: ${NUM_REPLICAS}" ]] - [[ "$output" =~ "minNumReplicas: 1" ]] # default - [[ "$output" =~ "metric: ${CUSTOM_METRIC}" ]] -} - -@test "Deleting deployment" { - run gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" -q \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] -} diff --git a/dm/templates/autoscaler/tests/integration/autoscaler.yaml b/dm/templates/autoscaler/tests/integration/autoscaler.yaml deleted file mode 100644 index feb31f83ba2..00000000000 --- a/dm/templates/autoscaler/tests/integration/autoscaler.yaml +++ /dev/null @@ -1,70 +0,0 @@ -# Test of the autoscaler template. -# - -imports: - - path: templates/autoscaler/autoscaler.py - name: autoscaler.py - -resources: - - name: regional-autoscaler-${RAND} - type: autoscaler.py - properties: - region: ${REGION} - cpuUtilization: - utilizationTarget: ${CPU_UTILIZATION_1} - customMetricUtilizations: - - metric: ${CUSTOM_METRIC} - utilizationTarget: ${CUSTOM_METRIC_TARGET} - utilizationTargetType: ${CUSTOM_METRIC_TYPE} - maxNumReplicas: ${NUM_REPLICAS} - target: $(ref.regional-igm-${RAND}.selfLink) - - - name: zonal-autoscaler-${RAND} - type: autoscaler.py - properties: - zone: ${ZONE} - description: ${DESCRIPTION} - coolDownPeriodSec: ${COOL_DOWN_PERIOD} - cpuUtilization: - utilizationTarget: ${CPU_UTILIZATION_2} - maxNumReplicas: ${NUM_REPLICAS} - target: $(ref.zonal-igm-${RAND}.selfLink) - -# Test prerequisites: two instance group managers, an instance template, -# and a network - - name: zonal-igm-${RAND} - type: compute.v1.instanceGroupManager - properties: - instanceTemplate: $(ref.instance-template-${RAND}.selfLink) - zone: ${ZONE} - targetSize: 1 - - - name: regional-igm-${RAND} - type: compute.v1.regionInstanceGroupManager - properties: - instanceTemplate: $(ref.instance-template-${RAND}.selfLink) - region: ${REGION} - targetSize: 1 - - - name: instance-template-${RAND} - type: compute.v1.instanceTemplate - properties: - properties: - machineType: f1-micro - disks: - - autoDelete: true - boot: true - deviceName: boot - initializeParams: - sourceImage: projects/ubuntu-os-cloud/global/images/family/ubuntu-1804-lts - type: PERSISTENT - networkInterfaces: - - accessConfigs: - - name: External NAT - type: ONE_TO_ONE_NAT - network: $(ref.test-network-${RAND}.selfLink) - - - name: test-network-${RAND} - type: compute.v1.network - properties: - autoCreateSubnetworks: true diff --git a/dm/templates/backend_service/README.md b/dm/templates/backend_service/README.md deleted file mode 100644 index 8c080e449ee..00000000000 --- a/dm/templates/backend_service/README.md +++ /dev/null @@ -1,70 +0,0 @@ -# Backend Service - -This template creates a backend service. - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) -- Grant the [compute.admin](https://cloud.google.com/compute/docs/access/iam) -IAM role to the Deployment Manager service account - -## Deployment - -### Resources - -- [compute.v1.backendService](https://cloud.google.com/compute/docs/reference/rest/v1/backendServices) -- [compute.v1.regionalBackendService](https://cloud.google.com/compute/docs/reference/latest/regionBackendServices) - -### Properties - -See the `properties` section in the schema file(s): -- [Backend Service](backend_service.py.schema) - -### Usage - -1. Clone the [Deployment Manager Samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit): - -```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit -``` - -2. Go to the [dm](../../) directory: - -```shell - cd dm -``` - -3. Copy the example DM config to be used as a model for the deployment; in this -case, [examples/backend\_service\_regional.yaml](examples/backend_service_regional.yaml): - -```shell - cp templates/backend_service/examples/backend_service_regional.yaml \ - my_backend_service.yaml -``` - -4. Change the values in the config file to match your specific GCP setup (for -properties, refer to the schema files listed above): - -```shell - vim my_backend_service.yaml # <== change values to match your GCP setup -``` - -5. Create your deployment (replace with the relevant -deployment name): - -```shell - gcloud deployment-manager deployments create \ - --config my_backend_service.yaml -``` - -6. In case you need to delete your deployment: - -```shell - gcloud deployment-manager deployments delete -``` - -## Examples - -- [Regional Backend Service](examples/backend_service_regional.yaml) -- [Global Backend Service](examples/backend_service_global.yaml) diff --git a/dm/templates/backend_service/backend_service.py b/dm/templates/backend_service/backend_service.py deleted file mode 100644 index b3df580582b..00000000000 --- a/dm/templates/backend_service/backend_service.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" This template creates a backend service. """ - -REGIONAL_GLOBAL_TYPE_NAMES = { - # https://cloud.google.com/compute/docs/reference/rest/v1/regionBackendServices - True: 'gcp-types/compute-v1:regionBackendServices', - # https://cloud.google.com/compute/docs/reference/rest/v1/backendServices - False: 'gcp-types/compute-v1:backendServices' -} - - -def set_optional_property(destination, source, prop_name): - """ Copies the property value if present. """ - - if prop_name in source: - destination[prop_name] = source[prop_name] - - -def get_backend_service_outputs(res_name, backend_name, region): - """ Creates outputs for the backend service. """ - - outputs = [ - { - 'name': 'name', - 'value': backend_name - }, - { - 'name': 'selfLink', - 'value': '$(ref.{}.selfLink)'.format(res_name) - } - ] - - if region: - outputs.append({'name': 'region', 'value': region}) - - return outputs - - -def generate_config(context): - """ Entry point for the deployment resources. """ - - properties = context.properties - res_name = context.env['name'] - name = properties.get('name', res_name) - project_id = properties.get('project', context.env['project']) - is_regional = 'region' in properties - region = properties.get('region') - backend_properties = { - 'name': name, - 'project': project_id, - } - - resource = { - 'name': res_name, - 'type': REGIONAL_GLOBAL_TYPE_NAMES[is_regional], - 'properties': backend_properties, - } - - optional_properties = [ - 'description', - 'backends', - 'iap', - 'timeoutSec', - 'protocol', - 'region', - 'portName', - 'enableCDN', - 'sessionAffinity', - 'affinityCookieTtlSec', - 'loadBalancingScheme', - 'connectionDraining', - 'healthChecks', - 'cdnPolicy' - ] - - for prop in optional_properties: - set_optional_property(backend_properties, properties, prop) - - if 'healthCheck' in properties: - backend_properties['healthChecks'] = [properties['healthCheck']] - - outputs = get_backend_service_outputs(res_name, name, region) - - return {'resources': [resource], 'outputs': outputs} diff --git a/dm/templates/backend_service/backend_service.py.schema b/dm/templates/backend_service/backend_service.py.schema deleted file mode 100644 index 3a9897e6974..00000000000 --- a/dm/templates/backend_service/backend_service.py.schema +++ /dev/null @@ -1,411 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Backend Service - author: Sourced Group Inc. - version: 1.0.0 - description: | - Creates a backend service. - - For more information on this resource: - https://cloud.google.com/load-balancing/docs/backend-service. - - APIs endpoints used by this template: - - gcp-types/compute-v1:backendServices => - https://cloud.google.com/compute/docs/reference/rest/v1/backendServices - - gcp-types/compute-v1:regionBackendServices => - https://cloud.google.com/compute/docs/reference/rest/v1/regionBackendServices - -additionalProperties: false - - -allOf: - - oneOf: - - required: - - healthCheck - - required: - - healthChecks - - allOf: - - not: - required: - - healthCheck - - not: - required: - - healthChecks - - oneOf: - - allOf: - - properties: - loadBalancingScheme: - enum: ["INTERNAL"] - sessionAffinity: - enum: - - NONE - - CLIENT_IP - - CLIENT_IP_PROTO - - CLIENT_IP_PORT_PROTO - protocol: - default: TCP - enum: - - UDP - - TCP - backends: - items: - balancingMode: - enum: ["CONNECTION"] - - not: - required: - - affinityCookieTtlSec - - not: - required: - - enableCDN - - not: - required: - - portName - - allOf: - - properties: - loadBalancingScheme: - enum: ["EXTERNAL"] - sessionAffinity: - enum: - - NONE - - CLIENT_IP - - GENERATED_COOKIE - protocol: - default: HTTP - enum: - - HTTP - - HTTPS - - TCP - - SSL - - required: - - portName - -properties: - name: - type: string - description: The backend service name. Resource name would be used if omitted. - project: - type: string - description: | - The project ID of the project containing the service. - description: - type: string - description: An optional description of the resource. - region: - type: string - description: | - The URL of the region where the regional backend service resides. - backends: - type: array - uniqueItems: true - description: | - The list of backends (instance groups) to which the backend service - distributes traffic. - items: - type: object - additionalProperties: false - required: - - group - oneOf: - - allOf: - - properties: - balancingMode: - enum: ["RATE"] - - not: - required: - - maxUtilization - - not: - required: - - maxConnections - - not: - required: - - maxConnectionsPerInstance - - not: - required: - - maxConnectionsPerEndpoint - - allOf: - - properties: - balancingMode: - enum: ["CONNECTION"] - - not: - required: - - maxUtilization - - not: - required: - - maxRate - - properties: - balancingMode: - enum: ["UTILIZATION"] - properties: - description: - type: string - description: | - An optional description of the resource. - group: - type: string - description: | - The fully-qualified URL of the Instance Group resource. If the - backend service's load balancing scheme is INTERNAL, the instance - group must reside on the same region as the backend service. - balancingMode: - type: string - description: | - The balancing mode for the backend. For INTERNAL load - balancing, the default (and the only supported) mode is CONNECTION. - enum: - - UTILIZATION - - RATE - - CONNECTION - maxUtilization: - type: number - minimum: 0.0 - maximum: 1.0 - description: | - The ratio that defines the CPU utilization target for the group. - The default value is 0.8. Used when balancingMode is UTILIZATION. - maxRate: - type: number - description: | - The maximum number of requests per second (RPS) for the group. Can be - used with either RATE or UTILIZATION balancing mode. Mandatory with - the RATE mode. For the RATE mode, either maxRate or maxRatePerInstance - must be set. Cannot be used for INTERNAL load balancing. - maxRatePerInstance: - type: number - description: | - The maximum number of requests per second (RPS) that a single backend - instance can handle. This is used to calculate the capacity of the group. - Can be used with any balancing mode. For the RATE mode, either maxRate or - maxRatePerInstance must be set. Cannot be used for INTERNAL load - balancing. - maxRatePerEndpoint: - type: number - description: | - The max requests per second (RPS) that a single backend network endpoint can handle. - This is used to calculate the capacity of the group. Can be used in either balancing mode. - For RATE mode, either maxRate or maxRatePerEndpoint must be set. - - This cannot be used for internal load balancing. - maxConnections: - type: number - description: | - The maximum number of simultaneous connections for the group. Can be - used with either CONNECTION or UTILIZATION balancing mode. For - the CONNECTION mode, either maxConnections or maxConnectionsPerInstance - must be set. Cannot be used for INTERNAL load balancing. - maxConnectionsPerInstance: - type: number - description: | - The maximum number of simultaneous connections that a single backend - instance can handle. This is used to calculate the capacity of the - group. Can be used in either CONNECTION or UTILIZATION balancing - modes. For the CONNECTION mode, either maxConnections or - maxConnectionsPerInstance must be set. Cannot be used for INTERNAL load balancing. - maxConnectionsPerEndpoint: - type: number - description: | - The max number of simultaneous connections that a single backend network endpoint can handle. - This is used to calculate the capacity of the group. Can be used in either - CONNECTION or UTILIZATION balancing modes. - For CONNECTION mode, either maxConnections or maxConnectionsPerEndpoint must be set. - - This cannot be used for internal load balancing. - capacityScaler: - type: number - minimum: 0 - maximum: 1 - description: | - The multiplier applied to the group's maximum servicing capacity - (based on UTILIZATION, RATE, or CONNECTION). The default value is 1.0. - Cannot be used for INTERNAL load balancing. - healthCheck: - type: string - description: | - The URL of the HealthCheck, HttpHealthCheck, or HttpsHealthCheck resource - for healthchecking the backend service. - healthChecks: - type: array - uniqueItems: true - maxItems: 1 - description: | - The URL of the HealthCheck, HttpHealthCheck, or HttpsHealthCheck resource - for healthchecking the backend service. - items: - type: string - timeoutSec: - type: number - default: 30 - description: | - The number of seconds to wait for the backend response before considering - the request as failed. - protocol: - type: string - description: | - The protocol the backend service uses to communicate with backends. - The default is HTTP. For INTERNAL load balancing, the possible values are - TCP and UDP, and the default is TCP. - enum: - - HTTP - - HTTPS - - TCP - - UDP - - SSL - portName: - type: string - description: | - The backend port name. The same name must appear in the instance groups - referenced by this service. Required when the load balancing scheme is - EXTERNAL. If the load balancing scheme is INTERNAL, this field is not used. - enableCDN: - type: boolean - description: | - Defines whether Cloud CDN is enabled for the backend service. - When the load balancing scheme is INTERNAL, this field is not used. - sessionAffinity: - type: string - default: NONE - description: | - The type of the session affinity to use. For the EXTERNAL load balancing - scheme, the value can be NONE, CLIENT_IP, or GENERATED_COOKIE. - For the INTERNAL load balancing scheme, the value can be NONE, CLIENT_IP, - CLIENT_IP_PROTO, or CLIENT_IP_PORT_PROTO. This field is not used with the - UDP protocol. - enum: - - NONE - - GENERATED_COOKIE - - CLIENT_IP - - CLIENT_IP_PROTO - - CLIENT_IP_PORT_PROTO - affinityCookieTtlSec: - type: integer - minimum: 0 - maximum: 86400 - description: | - The lifetime of cookies, in seconds, if sessionAffinity is GENERATED_COOKIE. - If set to 0, the cookies are non-persistent and last only until the end of - the browser session (or equivalent). This field is not used if the load - balancing scheme is INTERNAL. - loadBalancingScheme: - type: string - description: | - Defines whether the backend service is used with INTERNAL or - EXTERNAL load balancing schema. Backend service created for one type of load - balancing cannot be used with the other. - default: EXTERNAL - enum: - - INTERNAL - - EXTERNAL - connectionDraining: - type: object - additionalProperties: false - description: the connection draining settings. - properties: - drainingTimeoutSec: - type: integer - description: | - The time period during which the instance is drained (not accepting - new connections but still procedding the ones accepted earlier). - customRequestHeaders: - type: array - uniqueItems: true - description: | - Headers that the HTTP/S load balancer should add to proxied requests. - items: - type: string - iap: - type: object - additionalProperties: false - properties: - enabled: - type: boolean - oauth2ClientId: - type: string - oauth2ClientSecret: - type: string - cdnPolicy: - type: object - additionalProperties: false - description: The cloud CDN configuration for the backend service. - properties: - cacheKeyPolicy: - type: object - additionalProperties: false - description: The CacheKeyPolicy for the CdnPolicy. - properties: - includeProtocol: - type: boolean - description: | - Defines whether the HTTP and HTTPS requests are cached separately. - includeHost: - type: boolean - description: | - If True, requests to different hosts are cached separately. - includeQueryString: - type: boolean - description: | - If True, includes query string parameters in the cache key - according to queryStringWhitelist and queryStringBlacklist. If - neither of the two is set, the entire query string is included. If - False, the query string is excluded from the cache key entirely. - queryStringWhitelist: - type: array - uniqueItems: true - description: | - The names of the query string parameters to include in cache keys. - All other parameters are excluded. Specify either - queryStringWhitelist or queryStringBlacklist, not both. - '&' and '=' will be percent-encoded and not treated as delimiters. - items: - type: string - queryStringBlacklist: - type: array - uniqueItems: true - description: | - The names of query string parameters to exclude from cache keys. - All other parameters are included. Specify either - queryStringWhitelist or queryStringBlacklist, not both. - '&' and '=' are percent-encoded and not treated as delimiters. - items: - type: string - signedUrlCacheMaxAgeSec: - type: string - default: 3600s - description: | - The maximum number of seconds the response to a signed URL request is - considered fresh. After this time period, the response is - revalidated before being served. When serving responses to the signed URL - requests, Cloud CDN internally behaves as if all responses - from the backend have the "Cache-Control: public, max-age=[TTL]" header, - regardless of any existing Cache-Control header. The actual headers - served in responses are not altered. - -outputs: - name: - type: string - description: The backend name. - region: - type: string - description: | - The URL of the region where the regional backend service resides. - selfLink: - type: string - description: The URI (SelfLink) of the backend service resource. - -documentation: - - templates/backend_service/README.md - -examples: - - templates/backend_service/examples/backend_service_global.yaml - - templates/backend_service/examples/backend_service_regional.yaml diff --git a/dm/templates/backend_service/examples/backend_service_global.yaml b/dm/templates/backend_service/examples/backend_service_global.yaml deleted file mode 100644 index e39bb65c1d2..00000000000 --- a/dm/templates/backend_service/examples/backend_service_global.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Example of the backend service template usage. -# -# In this example, a global HTTP backend service is created. -# -# Replace the following placeholders with valid values: -# : a URL of a zonal instance group -# : a URL of the TCP healthcheck -# - -imports: - - path: templates/backend_service/backend_service.py - name: backend_service.py - -resources: - - name: global-http-backend-service - type: backend_service.py - properties: - protocol: HTTP - loadBalancingScheme: EXTERNAL - sessionAffinity: CLIENT_IP - backends: - - group: - balancingMode: UTILIZATION - maxUtilization: 0.8 - healthChecks: - - - portName: http diff --git a/dm/templates/backend_service/examples/backend_service_regional.yaml b/dm/templates/backend_service/examples/backend_service_regional.yaml deleted file mode 100644 index 3dd10cee253..00000000000 --- a/dm/templates/backend_service/examples/backend_service_regional.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# Example of the backend service template usage. -# -# In this example, an internal regional TCP backend service is created. -# -# Replace the following placeholders with valid values: -# : a region where the service resides -# : a URL of an instance group residing in -# -# : a URL of the TCP healthcheck - -imports: - - path: templates/backend_service/backend_service.py - name: backend_service.py - -resources: - - name: regional-backend-service - type: backend_service.py - properties: - region: - protocol: TCP - loadBalancingScheme: INTERNAL - backends: - - group: - balancingMode: CONNECTION - healthChecks: - - diff --git a/dm/templates/backend_service/tests/integration/backend_service.bats b/dm/templates/backend_service/tests/integration/backend_service.bats deleted file mode 100755 index a190db54cd8..00000000000 --- a/dm/templates/backend_service/tests/integration/backend_service.bats +++ /dev/null @@ -1,108 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores with dashes in the deployment name. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" - # test specific variables - export REGION="us-east1" - export RES_DESCRIPTION="This is the description" - export BC_DESCRIPTION="Backend description" - export TIMEOUT="35" - export ENABLE_CDN="true" - export SESSION="CLIENT_IP" - export REGIONAL_BALANCING_MODE="CONNECTION" - export GLOBAL_BALANCING_MODE="RATE" - export REGIONAL_BALANCING_SCHEME="INTERNAL" - export REGIONAL_BALANCING_PROTOCOL="TCP" - export GLOBAL_BALANCING_SCHEME="EXTERNAL" - export GLOBAL_BALANCING_PROTOCOL="HTTP" - export MAX_RATE="10000" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < "templates/backend_service/tests/integration/${TEST_NAME}.yaml" > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - delete_config - fi - - # Per-test teardown steps. -} - - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - run gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config ${CONFIG} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] -} - -@test "Verifying global external backend service" { - run gcloud compute backend-services describe \ - "global-external-backend-service-${RAND}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --global - [[ "$status" -eq 0 ]] - [[ "$output" =~ "balancingMode: ${GLOBAL_BALANCING_MODE}" ]] - [[ "$output" =~ "maxRate: ${MAX_RATE}" ]] - [[ "$output" =~ "loadBalancingScheme: ${GLOBAL_BALANCING_SCHEME}" ]] - [[ "$output" =~ "protocol: ${GLOBAL_BALANCING_PROTOCOL}" ]] - [[ "$output" =~ "enableCDN: ${ENABLE_CDN}" ]] -} - -@test "Verifying regional internal backend service" { - run gcloud compute backend-services describe \ - "regional-internal-backend-service-${RAND}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --region "${REGION}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "balancingMode: ${REGIONAL_BALANCING_MODE}" ]] - [[ "$output" =~ "loadBalancingScheme: ${REGIONAL_BALANCING_SCHEME}" ]] - [[ "$output" =~ "protocol: ${REGIONAL_BALANCING_PROTOCOL}" ]] - [[ "$output" =~ " description: ${BC_DESCRIPTION}" ]] - [[ "$output" =~ "description: ${RES_DESCRIPTION}" ]] - [[ "$output" =~ "sessionAffinity: ${SESSION}" ]] - [[ "$output" =~ "timeoutSec: ${TIMEOUT}" ]] -} - -@test "Deleting deployment" { - run gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" -q \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] -} diff --git a/dm/templates/backend_service/tests/integration/backend_service.yaml b/dm/templates/backend_service/tests/integration/backend_service.yaml deleted file mode 100644 index d7bf314045d..00000000000 --- a/dm/templates/backend_service/tests/integration/backend_service.yaml +++ /dev/null @@ -1,84 +0,0 @@ -# Test of the backend service template. - -imports: - - path: templates/backend_service/backend_service.py - name: backend_service.py - -resources: - - name: regional-internal-backend-service-${RAND} - type: backend_service.py - properties: - name: regional-internal-backend-service-${RAND} - region: ${REGION} - protocol: ${REGIONAL_BALANCING_PROTOCOL} - description: ${RES_DESCRIPTION} - loadBalancingScheme: ${REGIONAL_BALANCING_SCHEME} - timeoutSec: ${TIMEOUT} - sessionAffinity: ${SESSION} - backends: - - group: $(ref.regional-igm-${RAND}.instanceGroup) - description: ${BC_DESCRIPTION} - balancingMode: ${REGIONAL_BALANCING_MODE} - healthCheck: $(ref.test-healthcheck-tcp-${RAND}.selfLink) - - - name: global-external-backend-service-${RAND} - type: backend_service.py - properties: - protocol: ${GLOBAL_BALANCING_PROTOCOL} - healthCheck: $(ref.test-healthcheck-http-${RAND}.selfLink) - loadBalancingScheme: ${GLOBAL_BALANCING_SCHEME} - enableCDN: ${ENABLE_CDN} - backends: - - group: $(ref.zonal-igm-${RAND}.instanceGroup) - balancingMode: ${GLOBAL_BALANCING_MODE} - maxRate: ${MAX_RATE} - -# Prerequisites: instance groups, instance template, healthchecks, -# and a network - - name: regional-igm-${RAND} - type: compute.v1.regionInstanceGroupManager - properties: - instanceTemplate: $(ref.instance-template-${RAND}.selfLink) - region: ${REGION} - targetSize: 1 - - - name: zonal-igm-${RAND} - type: compute.v1.instanceGroupManager - properties: - instanceTemplate: $(ref.instance-template-${RAND}.selfLink) - zone: us-central1-c - targetSize: 1 - - - name: instance-template-${RAND} - type: compute.v1.instanceTemplate - properties: - properties: - machineType: f1-micro - disks: - - autoDelete: true - boot: true - deviceName: boot - initializeParams: - sourceImage: projects/ubuntu-os-cloud/global/images/family/ubuntu-1804-lts - type: PERSISTENT - networkInterfaces: - - accessConfigs: - - name: External NAT - type: ONE_TO_ONE_NAT - network: $(ref.test-network-${RAND}.selfLink) - - - name: test-network-${RAND} - type: compute.v1.network - properties: - autoCreateSubnetworks: true - - - name: test-healthcheck-tcp-${RAND} - type: compute.v1.healthCheck - properties: - type: TCP - tcpHealthCheck: - port: 80 - proxyHeader: NONE - - - name: test-healthcheck-http-${RAND} - type: compute.v1.httpHealthCheck diff --git a/dm/templates/bastion/README.md b/dm/templates/bastion/README.md deleted file mode 100644 index b72cbb86bda..00000000000 --- a/dm/templates/bastion/README.md +++ /dev/null @@ -1,82 +0,0 @@ -# Bastion Host - -> :warning: **NOTE** - -Check out SSH via IAP as an alternative to Bastion Hosts: - -- [Cloud IAP enables context-aware access to VMs via SSH and RDP without bastion hosts](https://cloud.google.com/blog/products/identity-security/cloud-iap-enables-context-aware-access-to-vms-via-ssh-and-rdp-without-bastion-hosts) -- [Using IAP for TCP forwarding](https://cloud.google.com/iap/docs/using-tcp-forwarding#tunneling_with_ssh) -> :warning: **NOTE** - -This template creates a Bastion host. Once it had been deployed, one can use -`gcloud compute ssh --zone ` to connect to -the Bastion host, and then use -`gcloud compute ssh --zone --internal-ip` to SSH to -another host, within the same network, that has no external IP assigned. - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) -- Grant the [compute.computeAdmin](https://cloud.google.com/compute/docs/access/iam) - IAM role to the Deployment Manager service account - -## Deployment - -### Resources - -- [compute.v1.instance](https://cloud.google.com/compute/docs/reference/rest/v1/instances) -- [compute.v1.firewall](https://cloud.google.com/compute/docs/reference/rest/v1/firewalls) - -### Properties - -See the `properties` section in the schema file(s): - -- [Bastion Host](bastion.py.schema) - -### Usage - -1. Clone the [Deployment Manager Samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit): - -```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit -``` - -2. Go to the [dm](../../) directory: - -```shell - cd dm -``` - -3. Copy the example DM config to be used as a model for the deployment; in this - case, [examples/bastion.yaml](examples/bastion.yaml): - -```shell - cp templates/bastion/examples/bastion.yaml \ - my_bastion.yaml -``` - -4. Change the values in the config file to match your specific GCP setup (for - properties, refer to the schema files listed above): - -```shell - vim my_bastion.yaml # <== change values to match your GCP setup -``` - -5. Create your deployment (replace \ with the relevant - deployment name): - -```shell - gcloud deployment-manager deployments create \ - --config my_bastion.yaml -``` - -6. In case you need to delete your deployment: - -```shell - gcloud deployment-manager deployments delete -``` - -## Examples - -- [Bastion](examples/bastion.yaml) diff --git a/dm/templates/bastion/bastion.py b/dm/templates/bastion/bastion.py deleted file mode 100644 index 9d8f6372600..00000000000 --- a/dm/templates/bastion/bastion.py +++ /dev/null @@ -1,254 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" This template creates a Bastion host. """ - -import copy - -IMAGE = 'projects/ubuntu-os-cloud/global/images/family/ubuntu-1804-lts' -DISABLE_SUDO_SCRIPT = """sudo cat /etc/sudoers | \\ - sed 's/%.*$//' | \\ - sed 's/#includedir.*$//' | \\ - sudo EDITOR=tee visudo""" -SSH = {'IPProtocol': 'tcp', 'ports': [22]} - - -def set_optional_property(destination, source, prop_name): - """ Copies the property value if present. """ - - if prop_name in source: - destination[prop_name] = source[prop_name] - - -def find_metadata_item(metadata_items, key_name): - """ Finds a metadata entry by the key name. """ - - for item in metadata_items: - if item['key'] == key_name: - return item - - return None - - -def disable_sudo(bastion_props): - """ Adds startup-script metadata that disables sudo. """ - - metadata = bastion_props.get('metadata', {'items': []}) - startup_item = find_metadata_item(metadata['items'], 'startup-script') - if not startup_item: - startup_item = {'key': 'startup-script', 'value': ''} - metadata['items'].append(startup_item) - new_script = DISABLE_SUDO_SCRIPT + '\n' + startup_item['value'] - startup_item['value'] = new_script - bastion_props['metadata'] = metadata - - -def get_ssh_firewall_rule( - name, - optional_properties, - output_name, - output_self_link -): - """ Creates a new firewall rule with outputs. """ - - ssh_props = {'allowed': [copy.deepcopy(SSH)]} - - ssh_rule = { - 'name': name, - # https://cloud.google.com/compute/docs/reference/rest/v1/firewalls - 'type': 'gcp-types/compute-v1:firewalls', - 'properties': ssh_props - } - - for key, value in optional_properties.items(): - if value: - ssh_props[key] = value - - return [ssh_rule], [ - { - 'name': output_name, - 'value': name - }, - { - 'name': output_self_link, - 'value': '$(ref.{}.selfLink)'.format(name) - }, - ] - - -def create_bastion_in_ssh_rule(bastion, network, firewall_settings): - """ Creates a firewall rule for inbound SSH traffic. """ - - to_bastion_rule = firewall_settings.get('sshToBastion') - - if to_bastion_rule: - bastion_host_tag = to_bastion_rule['tag'] - - # Append the Bastion tag, if it is not there yet. - existing_tags = bastion['properties'].get('tags', {}).get('items', []) - if not bastion_host_tag in existing_tags: - existing_tags.append(bastion_host_tag) - bastion['properties']['tags'] = {'items': existing_tags} - - rule_setup = { - 'name': to_bastion_rule['name'], - 'sourceTags': to_bastion_rule.get('sourceTags'), - 'targetTags': [bastion_host_tag], - 'sourceRanges': to_bastion_rule.get('sourceRanges'), - 'priority': to_bastion_rule.get('priority'), - 'network': network - } - - return get_ssh_firewall_rule( - '{}-to'.format(bastion['name']), - rule_setup, - 'sshToBastionRuleName', - 'sshToBastionRuleSelfLink' - ) - - return [], [] - - -def create_bastion_out_ssh_rule(bastion, network, firewall_settings): - """ Creates a firewall rule for the Bastion outbound SSH traffic. """ - - from_bastion_rule = firewall_settings.get('sshFromBastion') - if from_bastion_rule: - bastion_target_tag = from_bastion_rule.get('tag') - - # Calculate the firewall rule's source tags. - if 'sshToBastion' in firewall_settings: - bastion_host_tags = [firewall_settings['sshToBastion']['tag']] - else: - # Fall back to the instance tags collection. - bastion_host_tags = bastion['properties'].get('tags', {}) - bastion_host_tags = bastion_host_tags.get('items', []) - if bastion_host_tags: - bastion_host_tags = copy.deepcopy(bastion_host_tags) - else: - msg = 'To enable SSH traffic from the Bastion host, at least one network tag must be assigned to it.' # pylint: disable=line-too-long - raise ValueError(msg) - - rule_setup = { - 'name': from_bastion_rule['name'], - 'sourceTags': bastion_host_tags, - 'targetTags': [bastion_target_tag], - 'priority': from_bastion_rule.get('priority'), - 'network': network, - } - - return get_ssh_firewall_rule( - '{}-from'.format(bastion['name']), - rule_setup, - 'sshFromBastionRuleName', - 'sshFromBastionRuleSelfLink' - ) - - return [], [] - - -def create_firewall_rules(bastion, network, firewall_settings): - """ Creates in/out SSH rules for the Bastion host. """ - - ssh_in_resources, ssh_in_outputs = create_bastion_in_ssh_rule( - bastion, - network, - firewall_settings - ) - - ssh_out_resources, ssh_out_outputs = create_bastion_out_ssh_rule( - bastion, - network, - firewall_settings - ) - - return ( - ssh_in_resources + ssh_out_resources, - ssh_in_outputs + ssh_out_outputs - ) - - -def generate_config(context): - """ Entry point for the deployment resources. """ - - properties = context.properties - project_id = properties.get('project', context.env['project']) - name = properties.get('name', context.env['name']) - - network = properties['network'] - if not '$(ref' in network: - if not 'global/' in network: - network = 'global/networks/{}'.format(network) - if not 'project/' in network: - network = 'projects/{}/{}'.format(project_id, network) - - bastion_props = { - 'project': project_id, - 'name': name, - 'zone': properties['zone'], - 'networks': [{ - 'network': network, - 'accessConfigs': [{'type': 'ONE_TO_ONE_NAT'}] - }], - 'machineType': properties['machineType'], - 'diskImage': IMAGE, - } - - bastion = {'name': context.env['name'], 'type': 'instance.py', 'properties': bastion_props} - - optional_props = ['diskSizeGb', 'metadata', 'tags'] - - for prop in optional_props: - set_optional_property(bastion_props, properties, prop) - - if properties.get('disableSudo'): - disable_sudo(bastion_props) - - firewall_settings = properties.get('createFirewallRules') - if firewall_settings: - extra_resources, extra_outputs = create_firewall_rules( - bastion, - network, - firewall_settings - ) - else: - extra_resources = [] - extra_outputs = [] - - resources = [bastion] + extra_resources - for resource in resources: - resource['properties']['project'] = project_id - - outputs = [ - { - 'name': 'name', - 'value': name - }, - { - 'name': 'selfLink', - 'value': '$(ref.{}.selfLink)'.format(context.env['name']) - }, - { - 'name': 'internalIp', - 'value': '$(ref.{}.internalIp)'.format(context.env['name']) - }, - { - 'name': 'externalIp', - 'value': '$(ref.{}.externalIp)'.format(context.env['name']) - } - ] - - return { - 'resources': resources, - 'outputs': outputs + extra_outputs - } diff --git a/dm/templates/bastion/bastion.py.schema b/dm/templates/bastion/bastion.py.schema deleted file mode 100644 index 54e4ee5a7fc..00000000000 --- a/dm/templates/bastion/bastion.py.schema +++ /dev/null @@ -1,249 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Bastion Host - author: Sourced Group Inc. - version: 1.0.0 - description: | - Supports creation of a Bastion host - a jump host for SSHing into those - instances that have no external IP address. - - For more information on this resource: - https://cloud.google.com/solutions/connecting-securely - - APIs endpoints used by this template: - - gcp-types/compute-v1:firewalls => - https://cloud.google.com/compute/docs/reference/rest/v1/firewalls - - gcp-types/compute-v1:instances => - https://cloud.google.com/compute/docs/reference/rest/v1/instances - -imports: - - path: ../instance/instance.py - name: instance.py - -additionalProperties: false - -required: - - zone - - network - -properties: - name: - type: string - description: | - The name of the Bastion host. Resource name would be used if omitted. - project: - type: string - description: | - The project ID of the project containing the instance. - network: - type: string - description: | - The URL of the network resource for the Bastion host. - Examples of valid URLs: - - https://www.googleapis.com/compute/v1/projects/project/global/networks/network - - projects/project/global/networks/network - - global/networks/default - zone: - type: string - description: The availability zone; e.g. 'us-central1-a'. - machineType: - type: string - default: f1-micro - description: | - The Compute Instance type; e.g., 'n1-standard-1'. - See https://cloud.google.com/compute/docs/machine-types for details. - diskSizeGb: - type: integer - minimum: 10 - disableSudo: - type: boolean - default: true - description: | - !!! WARNING !!! This feature does not stop the GCP deamon addig - Cloud Identity users to sudoers based on Compute OS Login Admin IAM. - - When Trues (default), disables `sudo` on the Bastion host for tighter - security. - metadata: - type: object - additionalProperties: false - description: | - The Bastion host metadata. If the 'disableSudo' property is True, this - is the only place where you can configure the Bastion host using - 'startup-script', which provides elevated permissions. - For example: - metadata: - items: - - key: startup-script - - value: apt-get update - properties: - items: - type: array - uniqueItems: True - description: A collection of metadata key-value pairs. - items: - type: object - additionalProperties: false - properties: - key: - type: string - value: - type: [string, number, boolean] - createFirewallRules: - type: object - additionalProperties: false - description: | - If set, creates the firewall rules for the SSH traffic coming in an out - of the Bastion host. - anyOf: - - required: - - sshToBastion - - required: - - sshFromBastion - properties: - sshToBastion: - type: object - additionalProperties: false - description: | - Configures the firewall rule that controls the SSH traffic flow to - the Bastion host. If none of the other SSH firewall rules exist, this - rule will allow the SSH traffic to the Bastion host only. - required: - - tag - anyOf: - - required: - - sourceTags - - required: - - sourceRanges - properties: - tag: - type: string - description: | - The network tag to which the incoming SSH traffic will be - allowed. This tag will be attached to the Bastion host - automatically. - name: - type: string - default: allow-ssh-to-bastion - description: The name of the firewall rule. - sourceTags: - type: array - uniqueItems: True - description: | - If source tags are specified, the firewall rule applies only to - the traffic with source IPs that match the primary network - interfaces of those VM instances that have the tag, and are in - the same VPC network. - items: - type: string - sourceRanges: - type: array - uniqueItems: True - description: | - If source ranges are specified, the firewall applies only to the - traffic that has source IP address in these ranges. These ranges - must be expressed in the CIDR format. - items: - type: string - priority: - type: integer - description: The rule priority. - minimum: 0 - maximum: 65535 - sshFromBastion: - type: object - additionalProperties: false - description: | - Creates a firewall rule that allows the SSH traffic from the Bastion - host to any instance within the same network with a particular tag. - To detect traffic from the Bastion host, it must have at least one - network tag, assigned either via the - createFirewallRules.sshToBastion.tag property or via a tag - collection. - required: - - tag - properties: - tag: - type: string - description: | - The target hosts' tag that allows them to receive the SSH traffic - from the Bastion host only. - name: - type: string - default: allow-ssh-from-bastion - description: The name of the firewall rule. - priority: - type: integer - description: The rule priority. - minimum: 0 - maximum: 65535 - tags: - type: object - additionalProperties: false - required: - - items - description: Tags to apply to the instance. - properties: - items: - type: array - uniqueItems: True - description: An array of tags. - items: - type: string - -outputs: - name: - type: string - description: The name of the Bastion host. - selfLink: - type: string - description: | - The URI (SelfLink) of the Bastion host. - internalIp: - type: string - description: | - The internal IP address of the Bastion host. - externalIp: - type: array - description: | - The external IP address of the Bastion host. - sshToBastionRuleName: - type: array - description: | - If created, the name of the firewall rule controlling the SSH traffic - flow to the Bastion host. - sshToBastionRuleSelfLink: - type: array - description: | - If created, the URI (SelfLink) of the firewall rule controlling the - SSH traffic flow to the Bastion host. - sshFromBastionRuleName: - type: array - description: | - If created, the name of the firewall rule controlling the SSH traffic - flow from the Bastion host to other instances in the same network. - sshFromBastionRuleSelfLink: - type: array - description: | - If created, the URI (SelfLink) of the firewall rule controlling the - SSH traffic flow from the Bastion host to other instances in the - same network. - -documentation: - - templates/bastion/README.md - -examples: - - templates/bastion/examples/bastion.yaml diff --git a/dm/templates/bastion/examples/bastion.yaml b/dm/templates/bastion/examples/bastion.yaml deleted file mode 100644 index 3775a2afda2..00000000000 --- a/dm/templates/bastion/examples/bastion.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Example of the Bastion host template usage. -# -# This example creates a Bastion host with two SSH rules in the default -# network. The first firewall rule allows the Bastion host to receive the SSH -# traffic. The second rule allows other hosts with the 'example-bastion-target' -# tag to receive the SSH traffic from the Bastion host. - -imports: - - path: templates/bastion/bastion.py - name: bastion.py - -resources: - - name: example-bastion-1 - type: bastion.py - properties: - zone: us-central1-c - disableSudo: false - network: global/networks/default - createFirewallRules: - sshToBastion: - name: example-allow-ssh-to-bastion - tag: example-bastion-host - sourceRanges: - - 0.0.0.0/0 - sshFromBastion: - name: example-allow-ssh-from-bastion - tag: example-bastion-target - diff --git a/dm/templates/bastion/tests/integration/bastion.bats b/dm/templates/bastion/tests/integration/bastion.bats deleted file mode 100755 index 7dec17b3dc2..00000000000 --- a/dm/templates/bastion/tests/integration/bastion.bats +++ /dev/null @@ -1,267 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores with dashes in the deployment name. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" - # Test specific variables. - export BASTION1_RES_NAME="test-bastion-w-sudo-${RAND}" - export DEFAULT_MACHINE_TYPE="f1-micro" - export BASTION1_MACHINE_TYPE="n1-standard-1" - export BASTION2_RES_NAME="test-bastion-wo-sudo-${RAND}" - export BASTION2_NAME="test-bastion-wo-sudo-name-${RAND}" - export ZONE="us-central1-c" - export BASTION1_DISABLE_SUDO="false" - export BASTION2_DISABLE_SUDO="true" - export BASTION2_DISK_SIZE="10" - export NETWORK_NAME="test-network-${RAND}" - export PROVISION_COMPLETED_MARKER="provision-completed-marker" - export BASTION2_STARTUP="echo '${PROVISION_COMPLETED_MARKER}'" - export BASTION2_EXTRA_TAG="extra" - export BASTION2_TAG="bastion-host" - export SSH_TO_BASTION_RULE_NAME="allow-ssh-to-bastion-${RAND}" - export SSH_FROM_BASTION_DEFAULT_RULE_NAME="allow-ssh-from-bastion" - export SSH_TO_BASTION_PRIORITY="1001" - export SSH_FROM_BASTION_PRIORITY="1002" - export SSH_TO_BASTION_SOURCE_RANGE="0.0.0.0/0" - export SSH_TO_BASTION_SOURCE_TAG="bastion-trustee" - export SSH_FROM_BASTION_SOURCE_TAG="bastion-target" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < ${BATS_TEST_DIRNAME}/${TEST_NAME}.yaml > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - delete_config - fi - - # Per-test teardown steps. -} - - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - run gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config ${CONFIG} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] -} - -@test "Verifying the first Bastion host" { - run gcloud compute instances describe ${BASTION1_RES_NAME} \ - --zone ${ZONE} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "machineTypes/${BASTION1_MACHINE_TYPE}" ]] - [[ "$output" =~ "zones/$(ZONE)" ]] - [[ "$output" =~ "${NETWORK_NAME}" ]] -} - -@test "Verifying the first Bastion's sudo is ON" { - # Wait until VM provisioning finishes - i=0 - until gcloud compute instances get-serial-port-output \ - ${BASTION1_RES_NAME} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --zone ${ZONE} | grep ${PROVISION_COMPLETED_MARKER}; do - - sleep 5; - i=$(($i+1)) - - if [[ $i > 10 ]]; then break; fi - done - - run gcloud compute ssh ${BASTION1_RES_NAME} --command "sudo whoami" \ - --zone ${ZONE} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "root" ]] -} - -@test "Verifying the second Bastion host" { - run gcloud compute instances describe ${BASTION2_NAME} \ - --zone ${ZONE} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "machineTypes/${DEFAULT_MACHINE_TYPE}" ]] - [[ "$output" =~ "zones/${ZONE}" ]] - [[ "$output" =~ "sudo EDITOR=tee visudo" ]] # disable sudo startup script - [[ "$output" =~ "${BASTION2_STARTUP}" ]] # user startup script - [[ "$output" =~ "${NETWORK_NAME}" ]] -} - -@test "Verifying the second Bastion host's boot disk" { - run gcloud compute disks describe ${BASTION2_NAME} \ - --zone ${ZONE} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "sizeGb: '${BASTION2_DISK_SIZE}'" ]] -} - - -### Invalid test because Compute OS Login Admin IAM role adds sudoers ### -# -#@test "Verifying the second Bastion's sudo is OFF" { -# # Wait until VM provisioning finishes -# i=0 -# until gcloud compute instances get-serial-port-output ${BASTION2_NAME} \ -# --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ -# --zone ${ZONE} | grep ${PROVISION_COMPLETED_MARKER}; do -# -# sleep 5; -# i=$(($i+1)) -# -# if [[ $i > 10 ]]; then break; fi -# done -# -# run gcloud compute ssh ${BASTION2_NAME} --command "sudo -n whoami" \ -# --zone ${ZONE} \ -# --project "${CLOUD_FOUNDATION_PROJECT_ID}" -# echo "status = ${status}" -# echo "output = ${output}" -# [[ ! "$status" -eq 0 ]] -#} - -@test "Verifying the second Bastion's tags" { - run gcloud compute instances describe ${BASTION2_NAME} \ - --format "yaml(tags)" \ - --zone ${ZONE} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "- ${BASTION2_EXTRA_TAG}" ]] - [[ "$output" =~ "- ${BASTION2_TAG}" ]] -} - -@test "Verifying Bastion's inbound firewall rule" { - run gcloud compute firewall-rules describe "${SSH_TO_BASTION_RULE_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "IPProtocol: tcp" ]] - [[ "$output" =~ "- '22'" ]] - [[ "$output" =~ "direction: INGRESS" ]] - [[ "$output" =~ "disabled: false" ]] - [[ "$output" =~ "${NETWORK_NAME}" ]] - [[ "$output" =~ "priority: ${SSH_TO_BASTION_PRIORITY}" ]] -} - -@test "Verifying Bastion's inbound firewall rule's source range" { - run gcloud compute firewall-rules describe "${SSH_TO_BASTION_RULE_NAME}" \ - --format="yaml(sourceRanges)" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${SSH_TO_BASTION_SOURCE_RANGE}" ]] -} - -@test "Verifying Bastion's inbound firewall rule's source tag" { - run gcloud compute firewall-rules describe "${SSH_TO_BASTION_RULE_NAME}" \ - --format="yaml(sourceTags)" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${SSH_TO_BASTION_SOURCE_TAG}" ]] -} - -@test "Verifying Bastion's inbound firewall rule's target tag" { - run gcloud compute firewall-rules describe "${SSH_TO_BASTION_RULE_NAME}" \ - --format="yaml(targetTags)" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${BASTION2_TAG}" ]] -} - -@test "Verifying Bastion's outbound firewall rule" { - run gcloud compute firewall-rules describe \ - "${SSH_FROM_BASTION_DEFAULT_RULE_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "IPProtocol: tcp" ]] - [[ "$output" =~ "- '22'" ]] - [[ "$output" =~ "direction: INGRESS" ]] - [[ "$output" =~ "disabled: false" ]] - [[ "$output" =~ "${NETWORK_NAME}" ]] - [[ "$output" =~ "priority: ${SSH_FROM_BASTION_PRIORITY}" ]] -} - -@test "Verifying Bastion's outbound firewall rule's source tag" { - run gcloud compute firewall-rules describe \ - "${SSH_FROM_BASTION_DEFAULT_RULE_NAME}" \ - --format="yaml(sourceTags)" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${BASTION2_TAG}" ]] -} - -@test "Verifying Bastion's outbound firewall rule's target tag" { - run gcloud compute firewall-rules describe \ - "${SSH_FROM_BASTION_DEFAULT_RULE_NAME}" \ - --format="yaml(targetTags)" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${SSH_FROM_BASTION_SOURCE_TAG}" ]] -} - -@test "Deleting deployment" { - run gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" -q \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] -} - diff --git a/dm/templates/bastion/tests/integration/bastion.yaml b/dm/templates/bastion/tests/integration/bastion.yaml deleted file mode 100644 index e8accc4ee31..00000000000 --- a/dm/templates/bastion/tests/integration/bastion.yaml +++ /dev/null @@ -1,67 +0,0 @@ -# Test of the Bastion host template. - -imports: - - path: templates/bastion/bastion.py - name: bastion.py - -resources: - - name: ${BASTION1_RES_NAME} - type: bastion.py - properties: - zone: ${ZONE} - disableSudo: ${BASTION1_DISABLE_SUDO} - network: $(ref.${NETWORK_NAME}.selfLink) - machineType: ${BASTION1_MACHINE_TYPE} - metadata: - items: - - key: startup-script - value: ${BASTION2_STARTUP} - tags: - items: - - ${BASTION2_TAG} - - - name: ${BASTION2_RES_NAME} - type: bastion.py - properties: - name: ${BASTION2_NAME} - zone: ${ZONE} - disableSudo: ${BASTION2_DISABLE_SUDO} - diskSizeGb: ${BASTION2_DISK_SIZE} - network: $(ref.${NETWORK_NAME}.selfLink) - metadata: - items: - - key: startup-script - value: ${BASTION2_STARTUP} - tags: - items: - - ${BASTION2_EXTRA_TAG} - - ${BASTION2_TAG} - createFirewallRules: - sshToBastion: - name: ${SSH_TO_BASTION_RULE_NAME} - tag: ${BASTION2_TAG} - priority: ${SSH_TO_BASTION_PRIORITY} - sourceRanges: - - ${SSH_TO_BASTION_SOURCE_RANGE} - sourceTags: - - ${SSH_TO_BASTION_SOURCE_TAG} - sshFromBastion: - tag: ${SSH_FROM_BASTION_SOURCE_TAG} - priority: ${SSH_FROM_BASTION_PRIORITY} - -# Test prerequisites: - - - name: ${NETWORK_NAME} - type: compute.v1.network - properties: - autoCreateSubnetworks: true - - - name: test-network-allow-icmp - type: compute.v1.firewall - properties: - network: $(ref.${NETWORK_NAME}.selfLink) - sourceRanges: - - 0.0.0.0/0 - allowed: - - IPProtocol: icmp - diff --git a/dm/templates/bigquery/README.md b/dm/templates/bigquery/README.md deleted file mode 100644 index fa2fd1d27e7..00000000000 --- a/dm/templates/bigquery/README.md +++ /dev/null @@ -1,67 +0,0 @@ -# BigQuery - -This template creates a BigQuery dataset and table. - -## Prerequisites -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) -- Grant the [`roles/bigquery.dataEditor`, `roles/bigquery.dataOwner` or `roles/bigquery.admin`](https://cloud.google.com/bigquery/docs/access-control) IAM roles to the project service account - -## Deployment - -### Resources - -- [bigquery.v2.dataset](https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets) -- [bigquery.v2.tables](https://cloud.google.com/bigquery/docs/reference/rest/v2/tables) - -### Properties - -See the `properties` section in the schema file(s) - -- [BigQuery Dataset](bigquery_dataset.py.schema) -- [BigQuery Tables](bigquery_table.py.schema) - -### Usage - -1. Clone the [Deployment Manager Samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit) - -```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit -``` - -2. Go to the [dm](../../) directory - -```shell - cd dm -``` - -3. Copy the example DM config to be used as a model for the deployment, in this case [examples/bigquery.yaml](examples/bigquery.yaml) - -```shell - cp templates/bigquery/examples/bigquery.yaml my_bigquery.yaml -``` - -4. Change the values in the config file to match your specific GCP setup. - Refer to the properties in the schema files described above. - -```shell - vim my_bigquery.yaml # <== change values to match your GCP setup -``` - -5. Create your deployment as described below, replacing - with your with your own deployment name - -```shell - gcloud deployment-manager deployments create \ - --config my_bigquery.yaml -``` - -6. In case you need to delete your deployment: - -```shell - gcloud deployment-manager deployments delete -``` - -## Examples - -- [Bigquery Dataset and Table](examples/bigquery.yaml) diff --git a/dm/templates/bigquery/bigquery_dataset.py b/dm/templates/bigquery/bigquery_dataset.py deleted file mode 100644 index 1fb6cb9e4d0..00000000000 --- a/dm/templates/bigquery/bigquery_dataset.py +++ /dev/null @@ -1,108 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" This template creates a BigQuery dataset. """ - - -def generate_config(context): - """ Entry point for the deployment resources. """ - - # You can modify the roles you wish to whitelist. - whitelisted_roles = ['READER', 'WRITER', 'OWNER'] - - properties = context.properties - name = properties.get('name', context.env['name']) - project_id = properties.get('project', context.env['project']) - - properties = { - 'datasetReference': - { - 'datasetId': name, - 'projectId': project_id - }, - 'location': context.properties['location'], - 'projectId': project_id, - } - - optional_properties = [ - 'description', - 'defaultTableExpirationMs', - 'defaultPartitionExpirationMs' - ] - - for prop in optional_properties: - if prop in context.properties: - properties[prop] = context.properties[prop] - - if 'access' in context.properties: - # Validate access roles. - for access_role in context.properties['access']: - if 'role' in access_role: - role = access_role['role'] - if role not in whitelisted_roles: - raise ValueError( - 'Role supplied \"{}\" for dataset \"{}\" not ' - ' within the whitelist: {} '.format( - role, - context.properties['name'], - whitelisted_roles - ) - ) - - properties['access'] = context.properties['access'] - - if context.properties.get('setDefaultOwner', False): - # Build the default owner for the dataset. - base = '@cloudservices.gserviceaccount.com' - default_dataset_owner = context.env['project_number'] + base - - # Build the default access for the owner. - owner_access = { - 'role': 'OWNER', - 'userByEmail': default_dataset_owner - } - properties['access'].append(owner_access) - - resources = [ - { - # https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets - 'type': 'gcp-types/bigquery-v2:datasets', - 'name': context.env['name'], - 'properties': properties - } - ] - - outputs = [ - { - 'name': 'selfLink', - 'value': '$(ref.{}.selfLink)'.format(context.env['name']) - }, - { - 'name': 'datasetId', - 'value': name - }, - { - 'name': 'etag', - 'value': '$(ref.{}.etag)'.format(context.env['name']) - }, - { - 'name': 'creationTime', - 'value': '$(ref.{}.creationTime)'.format(context.env['name']) - }, - { - 'name': 'lastModifiedTime', - 'value': '$(ref.{}.lastModifiedTime)'.format(context.env['name']) - } - ] - - return {'resources': resources, 'outputs': outputs} diff --git a/dm/templates/bigquery/bigquery_dataset.py.schema b/dm/templates/bigquery/bigquery_dataset.py.schema deleted file mode 100644 index 81bdfa91acd..00000000000 --- a/dm/templates/bigquery/bigquery_dataset.py.schema +++ /dev/null @@ -1,215 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: BigQuery Dataset - author: Sourced Group Inc. - version: 1.0.1 - description: | - Creates a BigQuery dataset. - - For information on this resource: - https://cloud.google.com/bigquery/docs/. - - APIs endpoints used by this template: - - gcp-types/bigquery-v2:datasets => - https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets - -additionalProperties: false - -required: - - name - -properties: - name: - type: string - description: | - The table dataset name. Resource name would be used if omitted. - project: - type: string - description: | - The project ID of the project containing the dataset. The - Google apps domain is prefixed if applicable. - friendlyName: - type: string - description: | - A descriptive name for the dataset. - description: - type: string - description: | - A user-friendly description of the dataset. - location: - type: string - description: | - The geographic location where the dataset resides. - The default value is US. See details at - https://cloud.google.com/bigquery/docs/dataset-locations. - default: 'US' - enum: - - Asia - - EU - - US - - us-west2 - - northamerica-northeast1 - - us-east4 - - southamerica-east1 - - europe-north1 - - europe-west2 - - europe-west3 - - europe-west6 - - asia-east2 - - asia-south1 - - asia-northeast2 - - asia-east1 - - asia-northeast1 - - asia-southeast1 - - australia-southeast1 - access: - type: array - uniqueItems: true - description: | - An array of objects that define dataset access for one or more - entities. You can set this property when inserting or updating - the dataset to control who is allowed to access the data. If not - specified at the dataset creation time, BigQuery defines default - dataset access for the following entities: - access.specialGroup: projectReaders; access.role: READER - access.specialGroup: projectWriters; access.role: WRITER - access.specialGroup: projectOwners; access.role: OWNER - access.userByEmail: [dataset creator email]; access.role: OWNER - items: - type: object - additionalProperties: false - required: - - role - properties: - role: - type: string - description: | - An IAM role ID that should be granted to the user, group, or domain specified in this access entry. - The following legacy mappings will be applied: OWNER <=> roles/bigquery.dataOwner - WRITER <=> roles/bigquery.dataEditor READER <=> roles/bigquery.dataViewer This field will accept any of - the above formats, but will return only the legacy format. For example, if you set this field to - "roles/bigquery.dataOwner", it will be returned back as "OWNER". @mutable bigquery.datasets.update - domain: - type: string - description: | - The domain to grant access to. All users signed in with the - specified domain are granted the corresponding access. - Example: "example.com". - userByEmail: - type: string - description: | - The email address of a user to grant access to. For example: - fred@example.com. - groupByEmail: - type: string - description: The email address of a Google Group to grant access to. - specialGroup: - type: string - description: | - The special group to grant access to. Possible values include: - projectOwners: owners of the enclosing project - projectReaders: readers of the enclosing project - projectWriters: writers of the enclosing project - allAuthenticatedUsers: all authenticated BigQuery users - view: - type: object - additionalProperties: false - description: | - A view from a different dataset to grant access to. Queries - executed against that view have the Read access to tables in that - dataset. The Role value is not required when this field is set. If - the view is updated, access to that view must be granted again - via an Update operation. - properties: - datasetId: - type: string - description: The ID of the dataset containing the table. - projectId: - type: string - description: The ID fo the project containing the table. - tableId: - type: string - pattern: ^[0-9a-zA-Z][0-9a-zA-Z_]{4,1023}$ - description: | - The table ID. The ID must contain only letters - (a-z, A-Z), numbers (0-9), or underscores (_). The maximum - length is 1,024 characters. - setDefaultOwner: - type: boolean - default: False - description: | - Defines whether the default project service is granted the IAM owner - permissions. - defaultTableExpirationMs: - type: string - format: int64 - description: | - The default lifetime of all tables in the dataset, in milliseconds. The - minimum value is 3600000 milliseconds (one hour). Once this property is - set, all newly-created tables in the dataset get their expirationTime - property set to the creation time plus the value of this property. - Changes to the value affect only new tables, not the existing ones. When - expirationTime for a given table is reached, that table is deleted - automatically. If a table's expirationTime is modified or - removed before the table expires, or if you provide an explicit - expirationTime while creating the table, that value takes precedence over - the default expiration time indicated by this property. - minimum: 3600000 - defaultPartitionExpirationMs: - type: string - format: int64 - description: | - The default partition expiration for all partitioned tables in the dataset, in milliseconds. - Once this property is set, all newly-created partitioned tables in the dataset will have an expirationMs - property in the timePartitioning settings set to this value, and changing the value will only affect new tables, - not existing ones. The storage in a partition will have an expiration time of its partition time plus this value. - Setting this property overrides the use of defaultTableExpirationMs for partitioned tables: only one of - defaultTableExpirationMs and defaultPartitionExpirationMs will be used for any new partitioned table. - If you provide an explicit timePartitioning.expirationMs when creating or updating a partitioned table, - that value takes precedence over the default partition expiration time indicated by this property. - labels: - type: object - description: | - Map labels associated with this dataset. - Example: - name: wrench - mass: 1.3kg - count: 3 - -outputs: - selfLink: - type: string - description: The URI of the created resource. - etag: - type: string - description: The hash of the resource. - creationTime: - type: string - description: | - The time when the dataset was created, in milliseconds since - epoch. For example, 1535739430. - lastModifiedTime: - type: string - description: | - The time when the dataset or any of its tables was last - modified, in milliseconds since the epoch. For example, - 1535739430. - -documentation: - - templates/bigquery/README.md - -examples: - - templates/bigquery/examples/bigquery.yaml diff --git a/dm/templates/bigquery/bigquery_table.py b/dm/templates/bigquery/bigquery_table.py deleted file mode 100644 index bf408746827..00000000000 --- a/dm/templates/bigquery/bigquery_table.py +++ /dev/null @@ -1,104 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" This template creates a BigQuery table. """ - - -def generate_config(context): - """ Entry point for the deployment resources. """ - - properties = context.properties - name = properties.get('name', context.env['name']) - project_id = properties.get('project', context.env['project']) - - properties = { - 'tableReference': - { - 'tableId': name, - 'datasetId': context.properties['datasetId'], - 'projectId': project_id - }, - 'datasetId': context.properties['datasetId'], - 'projectId': project_id, - } - - optional_properties = [ - 'description', - 'friendlyName', - 'expirationTime', - 'schema', - 'timePartitioning', - 'externalDataConfiguration', - 'view' - ] - - for prop in optional_properties: - if prop in context.properties: - if prop == 'schema': - properties[prop] = {'fields': context.properties[prop]} - else: - properties[prop] = context.properties[prop] - - resources = [ - { - # https://cloud.google.com/bigquery/docs/reference/rest/v2/tables - 'type': 'gcp-types/bigquery-v2:tables', - 'name': context.env['name'], - 'properties': properties - } - ] - - if 'dependsOn' in context.properties: - resources[0]['metadata'] = {'dependsOn': context.properties['dependsOn']} - - outputs = [ - { - 'name': 'selfLink', - 'value': '$(ref.{}.selfLink)'.format(context.env['name']) - }, - { - 'name': 'etag', - 'value': '$(ref.{}.etag)'.format(context.env['name']) - }, - { - 'name': 'creationTime', - 'value': '$(ref.{}.creationTime)'.format(context.env['name']) - }, - { - 'name': 'lastModifiedTime', - 'value': '$(ref.{}.lastModifiedTime)'.format(context.env['name']) - }, - { - 'name': 'location', - 'value': '$(ref.{}.location)'.format(context.env['name']) - }, - { - 'name': 'numBytes', - 'value': '$(ref.{}.numBytes)'.format(context.env['name']) - }, - { - 'name': 'numLongTermBytes', - 'value': '$(ref.{}.numLongTermBytes)'.format(context.env['name']) - }, - { - 'name': 'numRows', - 'value': '$(ref.{}.numRows)'.format(context.env['name']) - }, - { - 'name': 'type', - 'value': '$(ref.{}.type)'.format(context.env['name']) - } - ] - - return {'resources': resources, 'outputs': outputs} diff --git a/dm/templates/bigquery/bigquery_table.py.schema b/dm/templates/bigquery/bigquery_table.py.schema deleted file mode 100644 index 610436ed67a..00000000000 --- a/dm/templates/bigquery/bigquery_table.py.schema +++ /dev/null @@ -1,568 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: BigQuery Table - author: Sourced Group Inc. - version: 1.0.0 - description: | - Creates a BigQuery table. - - For information on this resource: - https://cloud.google.com/bigquery/docs/. - - APIs endpoints used by this template: - - gcp-types/bigquery-v2:tables => - https://cloud.google.com/bigquery/docs/reference/rest/v2/tables - -additionalProperties: false - -required: - - name - -properties: - name: - type: string - description: | - The table name name. Resource name would be used if omitted. - project: - type: string - description: | - The project ID of the project containing the table. The - Google apps domain is prefixed if applicable. - datasetId: - type: string - description: | - The ID of the dataset the table belongs to. - friendlyName: - type: string - description: | - A descriptive name for the table. - description: - type: string - description: | - A user-friendly description of the dataset. - expirationTime: - type: string - description: | - The time when the table expires, in milliseconds since the epoch. If - not specified, the table persists indefinitely. Expired tables are - deleted, and their storage is reclaimed. The defaultTableExpirationMs - property of the encapsulating dataset can be used to set a default - expirationTime on newly created tables. For example, 1535739430. - encryptionConfiguration: - type: object - additionalProperties: false - description: | - Custom encryption configuration (e.g., Cloud KMS keys). - properties: - kmsKeyName: - type: string - description: | - Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. - The BigQuery Service Account associated with your project requires access to this encryption key. - externalDataConfiguration: - type: object - additionalProperties: false - description: | - Describes the data format, location, and other properties of a table stored outside of BigQuery. - By defining these properties, the data source can then be queried as if it were a standard BigQuery table. - required: - - sourceUris - - sourceFormat - properties: - sourceUris: - type: array - minItems: 1 - uniqueItems: true - description: | - The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: - Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. - Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: - Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a - Google Cloud Bigtable table. For Google Cloud Datastore backups, exactly one URI can be specified. - Also, the '*' wildcard character is not allowed. - items: - type: string - schema: - type: object - description: | - The schema for the data. Schema is required for CSV and JSON formats. Schema is disallowed for - Google Cloud Bigtable, Cloud Datastore backups, and Avro formats. - sourceFormat: - type: string - description: | - The data format. For CSV files, specify "CSV". For Google sheets, specify "GOOGLE_SHEETS". - For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro files, specify "AVRO". - For Google Cloud Datastore backups, specify "DATASTORE_BACKUP". - [Beta] For Google Cloud Bigtable, specify "BIGTABLE". - enum: - - CSV - - GOOGLE_SHEETS - - NEWLINE_DELIMITED_JSON - - AVRO - - DATASTORE_BACKUP - - BIGTABLE - - PARQUET - maxBadRecords: - type: number - description: | - The maximum number of bad records that BigQuery can ignore when reading data. If the number of - bad records exceeds this value, an invalid error is returned in the job result. - The default value is 0, which requires that all records are valid. This setting is ignored - for Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats. - autodetect: - type: boolean - description: | - Indicates if BigQuery should allow extra values that are not represented in the table schema. - If true, the extra values are ignored. If false, records with extra columns are treated as bad records, - and if there are too many bad records, an invalid error is returned in the job result. - The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: - CSV: Trailing columns JSON: Named values that don't match any column names - Google Cloud Bigtable: This setting is ignored - Google Cloud Datastore backups: This setting is ignored - Avro: This setting is ignored. - compression: - type: string - description: | - The compression type of the data source. Possible values include GZIP and NONE. The default value is NONE. - This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats. - An empty string is an invalid value. - enum: - - NONE - - GZIP - csvOptions: - type: object - additionalProperties: false - description: | - Additional properties to set if sourceFormat is set to CSV. - properties: - fieldDelimiter: - type: string - description: | - The separator for fields in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, - and then uses the first byte of the encoded string to split the data in its raw, binary state. - BigQuery also supports the escape sequence "\t" to specify a tab separator. - The default value is a comma (','). - skipLeadingRows: - type: number - description: | - The number of rows at the top of a CSV file that BigQuery will skip when reading the data. - The default value is 0. This property is useful if you have header rows in the file that should be skipped. - quote: - type: string - description: | - The value that is used to quote data sections in a CSV file. BigQuery converts the string to - ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, - binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, - set the property value to an empty string. If your data contains quoted newline characters, - you must also set the allowQuotedNewlines property to true. @default '"' - allowQuotedNewlines: - type: boolean - description: | - Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. - The default value is false. - allowJaggedRows: - type: boolean - description: | - Indicates if BigQuery should accept rows that are missing trailing optional columns. - If true, BigQuery treats missing trailing columns as null values. - If false, records with missing trailing columns are treated as bad records, and if there are - too many bad records, an invalid error is returned in the job result. The default value is false. - encoding: - type: string - description: | - The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. - The default value is UTF-8. BigQuery decodes the data after the raw, binary data has - been split using the values of the quote and fieldDelimiter properties. - enum: - - UTF-8 - - ISO-8859-1 - bigtableOptions: - type: object - additionalProperties: false - description: | - Additional options if sourceFormat is set to BIGTABLE. - properties: - columnFamilies: - type: array - uniqueItems: true - description: | - tabledata.list of column families to expose in the table schema along with their types. - This list restricts the column families that can be referenced in queries and specifies their value types. - You can use this list to do type conversions - see the 'type' field for more details. - If you leave this list empty, all column families are present in the table schema and their values - are read as BYTES. During a query only the column families referenced in that query are read from Bigtable. - items: - type: object - additionalProperties: false - properties: - familyId: - type: string - description: | - Identifier of the column family. - type: - type: string - description: | - The type to convert the value in cells of this column family. The values are expected to be - encoded using HBase Bytes.toBytes function when using the BINARY encoding value. - Following BigQuery types are allowed (case-sensitive) - BYTES STRING INTEGER FLOAT BOOLEAN - Default type is BYTES. This can be overridden for a specific column by listing that - column in 'columns' and specifying a type for it. - enum: - - BYTES - - STRING - - INTEGER - - FLOAT - - BOOLEAN - encoding: - type: string - description: | - The encoding of the values when the type is not STRING. Acceptable encoding values are: - - TEXT - indicates values are alphanumeric text strings. - - BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. - This can be overridden for a specific column by listing that column in - 'columns' and specifying an encoding for it. - enum: - - TEXT - - BINARY - columns: - type: array - uniqueItems: true - description: | - Lists of columns that should be exposed as individual fields as opposed to a list of - (column name, value) pairs. All columns whose qualifier matches a qualifier in this list - can be accessed as .. Other columns can be accessed as a list through .Column field. - items: - type: object - additionalProperties: false - required: - - qualifierEncoded - properties: - qualifierEncoded: - type: string - description: | - Qualifier of the column. Columns in the parent column family that has this exact qualifier - are exposed as . field. If the qualifier is valid UTF-8 string, it can be specified in - the qualifierString field. Otherwise, a base-64 encoded value must be set to qualifierEncoded. - The column field name is the same as the column qualifier. However, if the qualifier is not a - valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier - must be provided as fieldName. - qualifierString: - type: string - fieldName: - type: string - description: | - If the qualifier is not a valid BigQuery field identifier i.e. does not match - [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as the column field name - and is used as field name in queries. - type: - type: string - description: | - The type to convert the value in cells of this column. The values are expected to be - encoded using HBase Bytes.toBytes function when using the BINARY encoding value. - Following BigQuery types are allowed (case-sensitive) - BYTES STRING INTEGER FLOAT BOOLEAN - Default type is BYTES. 'type' can also be set at the column family level. - However, the setting at this level takes precedence if 'type' is set at both levels. - enum: - - BYTES - - STRING - - INTEGER - - FLOAT - - BOOLEAN - encoding: - type: string - description: | - The encoding of the values when the type is not STRING. Acceptable encoding values are: - - TEXT - indicates values are alphanumeric text strings. - - BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. - 'encoding' can also be set at the column family level. However, the setting at this level - takes precedence if 'encoding' is set at both levels. - enum: - - TEXT - - BINARY - onlyReadLatest: - type: boolean - description: | - If this is set, only the latest version of value in this column are exposed. - 'onlyReadLatest' can also be set at the column family level. However, the setting at - this level takes precedence if 'onlyReadLatest' is set at both levels. - ignoreUnspecifiedColumnFamilies: - type: boolean - description: | - If field is true, then the column families that are not specified in columnFamilies list are not - exposed in the table schema. Otherwise, they are read with BYTES type values. The default value is false. - readRowkeyAsString: - type: boolean - description: | - If field is true, then the rowkey column families will be read and converted to string. - Otherwise they are read with BYTES type values and users need to manually cast them with CAST if necessary. - The default value is false. - googleSheetsOptions: - type: object - additionalProperties: false - description: | - Additional options if sourceFormat is set to GOOGLE_SHEETS. - properties: - skipLeadingRows: - type: number - description: | - The number of rows at the top of a sheet that BigQuery will skip when reading the data. - The default value is 0. This property is useful if you have header rows that should be skipped. - When autodetect is on, behavior is the following: * skipLeadingRows unspecified - Autodetect tries to - detect headers in the first row. If they are not detected, the row is read as data. Otherwise data - is read starting from the second row. * skipLeadingRows is 0 - Instructs autodetect that there are - no headers and data should be read starting from the first row. * skipLeadingRows = N > 0 - Autodetect - skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. - Otherwise row N is used to extract column names for the detected schema. - range: - type: string - description: | - [Beta] Range of a sheet to query from. Only used when non-empty. - hivePartitioningOptions: - type: object - properties: - mode: - type: string - description: | - [Experimental] When set, what mode of hive partitioning to use when reading data. - Two modes are supported: - - AUTO: automatically infer partition key name(s) and type(s). - - STRINGS: automatically infer partition key name(s). All types are strings. - Not all storage formats support hive partitioning -- requesting hive partitioning - on an unsupported format will lead to an error. - enum: - - AUTO - - STRINGS - sourceUriPrefix: - type: string - description: | - When hive partition detection is requested, a common prefix for all source uris must be required. - The prefix must end immediately before the partition key encoding begins. - For example, consider files following this data layout: - - gs://bucket/path_to_table/dt=2019-06-01/country=USA/id=7/file.avro - - gs://bucket/path_to_table/dt=2019-05-31/country=CA/id=3/file.avro - When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either - of gs://bucket/path_to_table or gs://bucket/path_to_table/. - clustering: - type: object - additionalProperties: false - description: | - Clustering specification for the table. Must be specified with time-based partitioning, data in the table - will be first partitioned and subsequently clustered. - required: - - fields - properties: - fields: - type: array - minItems: 1 - uniqueItems: true - description: | - One or more fields on which data should be clustered. Only top-level, non-repeated, simple-type fields - are supported. The order of the fields will determine how clusters will be generated, so it is important. - items: - type: string - requirePartitionFilter: - type: boolean - description: | - [Beta] If set to true, queries over this table require a partition filter that can be used for - partition elimination to be specified. - timePartitioning: - type: object - additionalProperties: false - description: | - The time-based partitioning specification for this table. - properties: - expirationMs: - type: string - format: int64 - description: | - The number of milliseconds for which the storage for partitions - is kept in the table. The storage in a partition has an expiration - time of its partition time plus the expirationMs value. - field: - type: string - description: | - The field to be used for table partitioning. This field must be a top-level - TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED. - If not specified, the table is partitioned by a pseudo-column, - referenced via either '_PARTITIONTIME' as TIMESTAMP type or - '_PARTITIONDATE' as DATE type. - requirePartitionFilter: - type: boolean - description: | - [Beta] If True, queries over the table require a partition filter - (that can be used for partition elimination) to be specified. - type: - type: string - description: | - The only supported type is DAY, which generates one partition - per day. - view: - type: object - additionalProperties: false - description: The view definintion. - properties: - query: - type: string - description: | - The query that BigQuery executes when the view is referenced. - useLegacySql: - type: boolean - description: | - Defines whether to use BigQuery's legacy SQL for the view. The - default is True. If set to False, the view uses BigQuery's - standard SQL: https://cloud.google.com/bigquery/sql-reference/. - Queries and views that reference the view must use the same flag - value. - userDefinedFunctionResources: - type: array - uniqueItems: true - description: | - User-defined function resources used in the query. - items: - oneOf: - - inlineCode: - type: string - description: | - The inline resource that contains code for a user-defined - function (UDF). An equivalent a URI for a file containing - the same code. - - resourceUri: - type: string - description: | - The code resource to load from a Google Cloud Storage URI - (gs://bucket/path). - schema: - type: array - uniqueItems: true - description: | - The schema for the data. Required for the CSV and JSON formats. - Disallowed for the Google Cloud Bigtable, Cloud Datastore - backups, and Avro formats. - items: - type: object - additionalProperties: false - description: Defines the table fields. - required: - - name - - type - properties: - name: - type: string - pattern: ^[_a-zA-Z][0-9a-zA-Z_]{1,128}$ - description: | - The field name. Must contain only letters (a-z, A-Z), - numbers (0-9), or underscores (_); must start with a letter - or underscore. The maximum length is 128 characters. - type: - type: string - description: | - The field data type. Possible values are STRING, BYTES, - INTEGER, INT64 (same as INTEGER), FLOAT, FLOAT64 (same as - FLOAT), BOOLEAN, BOOL (same as BOOLEAN), TIMESTAMP, DATE, TIME, - DATETIME, RECORD (indicates that the field contains a nested - schema), and STRUCT (same as RECORD). See details at - https://cloud.google.com/bigquery/docs/schemas and - https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types. - enum: - - STRING - - BYTES - - INTEGER - - INT64 - - FLOAT - - FLOAT64 - - BOOLEAN - - BOOL - - TIMESTAMP - - DATE - - TIME - - DATETIME - - RECORD - - STRUCT - mode: - type: string - description: - The field mode. Possible values are NULLABLE, REQUIRED, and - REPEATED. The default is NULLABLE. - enum: - - NULLABLE - - REQUIRED - - REPEATED - description: - type: string - description: | - The field description. The maximum length is 1,024 characters. - labels: - type: object - description: | - Map labels associated with this table. - Example: - name: wrench - mass: 1.3kg - count: 3 - -outputs: - selfLink: - type: string - description: The URI of the created resource. - etag: - type: string - description: The hash of the resource. - creationTime: - type: string - description: | - The time when the dataset was created, in milliseconds since - epoch. - lastModifiedTime: - type: string - description: | - The date when the dataset (or any of its tables) was last - modified, in milliseconds since the epoch. - location: - type: string - description: | - The geographic location where the table resides. This value is - inherited from the dataset. - numBytes: - type: string - description: | - The size of the table in bytes, excluding data in the streaming - buffer. - numLongTermBytes: - type: string - format: int64 - description: | - The number of bytes in the table that are considered - \"long-term storage\". - numRows: - type: string - description: | - The number of rows of data in the table, excluding data in the - streaming buffer. - type: - type: string - description: | - The table type. The following values are supported: - TABLE - a normal BigQuery table - VIEW - a virtual table defined by an SQL query - EXTERNAL - a table that references data stored in an external - storage system, such as Google Cloud Storage. - The default value is TABLE. - -documentation: - - templates/bigquery/README.md - -examples: - - templates/bigquery/examples/bigquery.yaml diff --git a/dm/templates/bigquery/examples/bigquery.yaml b/dm/templates/bigquery/examples/bigquery.yaml deleted file mode 100644 index e84e2445a38..00000000000 --- a/dm/templates/bigquery/examples/bigquery.yaml +++ /dev/null @@ -1,34 +0,0 @@ -# Example of the BigQuery (dataset and table) template usage. -# -# Replace `` with your account email. - -imports: - - path: templates/bigquery/bigquery_dataset.py - name: bigquery_dataset.py - - path: templates/bigquery/bigquery_table.py - name: bigquery_table.py - -resources: - - name: test-bq-dataset - type: bigquery_dataset.py - properties: - name: test_bq_dataset - location: US - access: - - role: OWNER - userByEmail: - - - name: test-bq-table - type: bigquery_table.py - properties: - name: test_bq_table - dependsOn: - - test-bq-dataset - datasetId: $(ref.test-bq-dataset.datasetId) - schema: - - name: firstname - type: STRING - - name: lastname - type: STRING - - name: age - type: INTEGER diff --git a/dm/templates/bigquery/tests/integration/bigquery.bats b/dm/templates/bigquery/tests/integration/bigquery.bats deleted file mode 100644 index 7ad2dd243e6..00000000000 --- a/dm/templates/bigquery/tests/integration/bigquery.bats +++ /dev/null @@ -1,100 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export TEST_SERVICE_ACCOUNT="test-sa-${RAND}" - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores with dashes in the deployment name. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < "${BATS_TEST_DIRNAME}/${TEST_NAME}.yaml" > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - gcloud iam service-accounts create "${TEST_SERVICE_ACCOUNT}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - create_config - fi - - # Per-test setup steps. - } - -function teardown() { - # Global teardown; executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - gcloud iam service-accounts delete \ - "${TEST_SERVICE_ACCOUNT}@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - rm -f "${RANDOM_FILE}" - delete_config - fi - - # Per-test teardown steps -} - - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config ${CONFIG} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -} - -@test "Verifying that a dataset was created in deployment ${DEPLOYMENT_NAME}" { - run bq show --format=prettyjson \ - "${CLOUD_FOUNDATION_PROJECT_ID}":test_bq_dataset_${RAND} - [ "$status" -eq 0 ] - [[ "$output" =~ "\"datasetId\": \"test_bq_dataset_${RAND}\"" ]] -} - -@test "Verifying that a table was created in the dataset in deployment ${DEPLOYMENT_NAME}" { - run bq ls --format=prettyjson \ - "${CLOUD_FOUNDATION_PROJECT_ID}":test_bq_dataset_${RAND} - [ "$status" -eq 0 ] - [[ "$output" =~ "\"tableId\": \"test_bq_table_${RAND}\"" ]] -} - -@test "Verifying that a table schema was created in the dataset deployment ${DEPLOYMENT_NAME}" { - run bq show --schema test_bq_dataset_${RAND}.test_bq_table_${RAND} - [ "$status" -eq 0 ] - [[ "$output" =~ "{\"type\":\"STRING\",\"name\":\"firstname\"}" ]] - [[ "$output" =~ "{\"type\":\"STRING\",\"name\":\"lastname\"}" ]] - [[ "$output" =~ "{\"type\":\"INTEGER\",\"name\":\"age\"}" ]] -} - -@test "Deleting deployment" { - gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -q - - run bq show --format=prettyjson "${CLOUD_FOUNDATION_PROJECT_ID}":test_bq_dataset_${RAND} - [[ ! "$output" =~ "\datasetId\": \"test_bq_dataset_${RAND}\"" ]] - - run bq ls --format=prettyjson "${CLOUD_FOUNDATION_PROJECT_ID}":test_bq_dataset_${RAND} - [[ ! "$output" =~ "\"tableId\": \"test_bq_table_${RAND}\"" ]] -} diff --git a/dm/templates/bigquery/tests/integration/bigquery.yaml b/dm/templates/bigquery/tests/integration/bigquery.yaml deleted file mode 100644 index 557227f2c24..00000000000 --- a/dm/templates/bigquery/tests/integration/bigquery.yaml +++ /dev/null @@ -1,39 +0,0 @@ -# Test of the BigQuery template. -# -# Variables: -# RAND: a random string used by the testing suite. -# CLOUD_FOUNDATION_USER_ACCOUNT: grants the user account configured -# in the cloud_foundation_tests.conf permissions to perform BigQuery -# command-line actions to validate the datasets and tables that were -# created. For example, `bq show` and `bq ls`. - -imports: - - path: templates/bigquery/bigquery_dataset.py - name: bigquery_dataset.py - - path: templates/bigquery/bigquery_table.py - name: bigquery_table.py - -resources: - - name: test-bq-dataset-${RAND} - type: bigquery_dataset.py - properties: - name: test_bq_dataset_${RAND} - location: US - access: - - role: OWNER - userByEmail: ${CLOUD_FOUNDATION_USER_ACCOUNT} - - - name: test-bq-table-${RAND} - type: bigquery_table.py - properties: - name: test_bq_table_${RAND} - datasetId: $(ref.test-bq-dataset-${RAND}.datasetId) - dependsOn: - - test-bq-dataset-${RAND} - schema: - - name: firstname - type: STRING - - name: lastname - type: STRING - - name: age - type: INTEGER diff --git a/dm/templates/cloud_filestore/README.md b/dm/templates/cloud_filestore/README.md deleted file mode 100644 index 1502ea86d0b..00000000000 --- a/dm/templates/cloud_filestore/README.md +++ /dev/null @@ -1,66 +0,0 @@ -# Cloud Filestore - -This template creates a Cloud Filestore instance. - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) -- Enable the [Cloud Build API](https://cloud.google.com/cloud-build/docs/api/reference/rest/) -- Enable the [Cloud Filestore API](https://cloud.google.com/filestore/docs/reference/rest/) -- Make sure that your account has the Project Editor access level, or had been granted the [roles/deploymentmanager.editor](https://cloud.google.com/deployment-manager/docs/access-control#predefined_roles) IAM role -- Make sure that the [Google APIs service account](https://cloud.google.com/deployment-manager/docs/access-control#access_control_for_deployment_manager) has **default** permissions, or had been explicitly granted the [roles/file.editor](https://cloud.google.com/functions/docs/reference/iam/roles#standard-roles) IAM role - -## Deployment - -### Resources - -- [gcp-types/file-v1beta1:instances](https://cloud.google.com/filestore/docs/reference/rest/v1beta1/projects.locations.instances/create) - -### Properties - -See the `properties` section in the schema file(s): -- [Cloud Filestore](cloud_filestore.py.schema) - -### Usage - -1. Clone the [Deployment Manager Samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit): - -```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit -``` - -2. Go to the [dm](../../) directory: - -```shell - cd cloud-foundation-toolkit/dm -``` - -3. Copy the example DM config to be used as a model for the deployment; in this case, [examples/cloud\_filestore.yaml](examples/cloud_filestore.yaml): - -```shell - cp templates/cloud_filestore/examples/cloud_filestore.yaml my_cloud_filestore.yaml -``` - -4. Change the values in the config file to match your specific GCP setup (for properties, refer to the schema files listed above): - -```shell - vim my_cloud_filestore.yaml # <== change values to match your GCP setup -``` - -5. Create your deployment (replace with the relevant deployment name): - -```shell - gcloud deployment-manager deployments create \ - --config my_cloud_filestore.yaml -``` - -6. In case you need to delete your deployment: - -```shell - gcloud deployment-manager deployments delete -``` - -## Examples - -- [Cloud Filestore](examples/cloud_filestore.yaml) diff --git a/dm/templates/cloud_filestore/cloud_filestore.py b/dm/templates/cloud_filestore/cloud_filestore.py deleted file mode 100644 index 20e8add3f6a..00000000000 --- a/dm/templates/cloud_filestore/cloud_filestore.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" This template creates a Google Cloud Filestore instance. """ - - -def generate_config(context): - """ Entry point for the deployment resources. """ - - resources = [] - properties = context.properties - project_id = properties.get('project', context.env['project']) - name = properties.get('name', context.env['name']) - - resource = { - 'name': context.env['name'], - # https://cloud.google.com/filestore/docs/reference/rest/v1beta1/projects.locations.instances/create - 'type': 'gcp-types/file-v1beta1:projects.locations.instances', - 'properties': { - 'parent': 'projects/{}/locations/{}'.format(project_id, properties['location']), - 'instanceId': name, - } - } - - optional_props = [ - 'description', - 'tier', - 'labels', - 'fileShares', - 'networks', - ] - - for prop in optional_props: - if prop in properties: - resource['properties'][prop] = properties[prop] - - resources.append(resource) - - return { - 'resources': - resources, - 'outputs': - [ - { - 'name': 'name', - 'value': name - }, - { - 'name': 'fileShares', - 'value': '$(ref.{}.fileShares)'.format(context.env['name']) - }, - { - 'name': 'networks', - 'value': '$(ref.{}.networks)'.format(context.env['name']) - } - ] - } diff --git a/dm/templates/cloud_filestore/cloud_filestore.py.schema b/dm/templates/cloud_filestore/cloud_filestore.py.schema deleted file mode 100644 index c8ae29e7168..00000000000 --- a/dm/templates/cloud_filestore/cloud_filestore.py.schema +++ /dev/null @@ -1,131 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Google Cloud Filestore - author: Sourced Group Inc. - version: 1.0.0 - description: | - Supports creation of a Filestore instance. - - For more information on this resource: - https://cloud.google.com/filestore/docs/reference/rest/. - - APIs endpoints used by this template: - - gcp-types/file-v1beta1:instances => - https://cloud.google.com/filestore/docs/reference/rest/v1beta1/projects.locations.instances/create - -additionalProperties: false - -properties: - name: - type: string - description: | - The name of the instance to create. The name must be unique for the specified project and location. - project: - type: string - description: | - The project ID of the project to create Filestore instance. - location: - type: string - default: europe-west1-b - description: | - The instance's location - map to GCP zones, for example us-west1-b. - description: - type: string - description: | - A description of the instance (2048 characters or less). - tier: - type: string - default: TIER_UNSPECIFIED - description: | - The region name where the bucket is deployed. - enum: - - TIER_UNSPECIFIED - - STANDARD - - PREMIUM - labels: - type: object - description: | - Resource labels to represent user provided metadata. - fileShares: - type: array - uniqItems: true - minItems: 1 - maxItems: 1 - description: | - File share configurations for the instance. - items: - type: object - additionalProperties: false - required: - - name - - capacityGb - properties: - name: - type: string - description: | - The name of the file share (must be 16 characters or less). - capacityGb: - type: integer - minimum: 2560 - description: | - File share capacity in gigabytes (GB). Cloud Filestore defines 1 GB as 1024^3 bytes. - networks: - type: array - uniqItems: true - minItems: 1 - maxItems: 1 - description: | - Network configurations for the instance. - items: - type: object - additionalProperties: false - required: - - network - - modes - properties: - network: - type: string - description: | - The name of the Google Compute Engine VPC network to which the instance is connected. - modes: - type: array - uniqItems: true - default: [ "MODE_IPV4" ] - description: | - Internet protocol versions for which the instance has IP addresses assigned. For this version, - only MODE_IPV4 is supported. - items: - type: string - default: MODE_IPV4 - enum: - - MODE_IPV4 - reservedIpRange: - type: integer - description: | - A /29 CIDR block in one of the internal IP address ranges that identifies the range of IP addresses - reserved for this instance. For example, 10.0.0.0/29 or 192.168.0.0/29. The range you specify can't - overlap with either existing subnets or assigned IP address ranges for other Cloud Filestore instances in the selected VPC network. - -outputs: - name: - type: string - description: The resource name of the instance. - -documentation: - - templates/cloud_filestore/README.md - -examples: - - templates/cloud_filestore/examples/cloud_filestore.yaml diff --git a/dm/templates/cloud_filestore/examples/cloud_filestore.yaml b/dm/templates/cloud_filestore/examples/cloud_filestore.yaml deleted file mode 100644 index 847555372d6..00000000000 --- a/dm/templates/cloud_filestore/examples/cloud_filestore.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Example of the Cloud Filestore template usage. -# -# In this example, a Cloud Filestore with an HTTP trigger -# is created from the existing source. -# -# Replace the following placeholders with valid values: -# : a region where the filestore instance resides -# -imports: - - path: templates/cloud_filestore/cloud_filestore.py - name: cloud_filestore.py - -resources: - - name: test-filestore - type: cloud_filestore.py - properties: - location: - description: | - Test filestore instance - tier: PREMIUM - labels: - foo: bar - fileShares: - - name: test - capacityGb: 2560 - networks: - - network: default diff --git a/dm/templates/cloud_filestore/tests/integration/cloud_filestore.bats b/dm/templates/cloud_filestore/tests/integration/cloud_filestore.bats deleted file mode 100755 index f8451f579c0..00000000000 --- a/dm/templates/cloud_filestore/tests/integration/cloud_filestore.bats +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < "templates/cloud_filestore/tests/integration/${TEST_NAME}.yaml" > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; this is executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; this is executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - delete_config - fi - - # Per-test teardown steps. -} - - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config "${CONFIG}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -} - -@test "Verifying that test-filestore-${RAND} was created in deployment ${DEPLOYMENT_NAME}" { - run gcloud filestore instances list --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$output" =~ "test-filestore-${RAND}" ]] -} - -@test "Deleting deployment" { - gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" -q \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - run gcloud filestore instances list --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ ! "$output" =~ "test-filestore-${RAND}" ]] -} diff --git a/dm/templates/cloud_filestore/tests/integration/cloud_filestore.yaml b/dm/templates/cloud_filestore/tests/integration/cloud_filestore.yaml deleted file mode 100644 index 11862e96e4e..00000000000 --- a/dm/templates/cloud_filestore/tests/integration/cloud_filestore.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# Test of the Cloud Filestore template. -# -# Variables: -# RAND: A random string used by the testing suite. - -imports: - - path: templates/cloud_filestore/cloud_filestore.py - name: cloud_filestore.py - -resources: - - name: test-filestore-${RAND} - type: cloud_filestore.py - properties: - name: test-filestore-${RAND} - description: | - Test filestore instance - tier: PREMIUM - labels: - foo: bar - fileShares: - - name: test - capacityGb: 2560 - networks: - - network: default diff --git a/dm/templates/cloud_function/README.md b/dm/templates/cloud_function/README.md deleted file mode 100644 index c78e2d3625b..00000000000 --- a/dm/templates/cloud_function/README.md +++ /dev/null @@ -1,74 +0,0 @@ -# Cloud Function - -This template creates a Cloud Function. - -| Warning: As of January 15, 2019, newly created Functions are private-by-default and will require [appropriate IAM permissions](https://cloud.google.com/functions/docs/reference/iam/roles) to be invoked. | -|---| - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) -- Enable the [Cloud Build API](https://cloud.google.com/cloud-build/docs/api/reference/rest/) -- Enable the [Cloud Functions API](https://cloud.google.com/functions/docs/reference/rest/) -- Make sure that your account has the Project Editor access level, or had been granted the [roles/deploymentmanager.editor](https://cloud.google.com/deployment-manager/docs/access-control#predefined_roles) IAM role -- Make sure that the [Google APIs service account](https://cloud.google.com/deployment-manager/docs/access-control#access_control_for_deployment_manager) has **default** permissions, or had been explicitly granted the [roles/cloudfunctions.developer](https://cloud.google.com/functions/docs/reference/iam/roles#standard-roles) IAM role -- Make sure that the [Cloud Functions service account](https://cloud.google.com/functions/docs/concepts/iam#cloud_functions_service_account) -has **default** permissions, or had been granted the [CloudFunctions.ServiceAgent](https://cloud.google.com/functions/docs/concepts/iam#cloud_functions_service_account) IAM role - -## Deployment - -### Resources - -- [cloudfunctions.v1.function](https://cloud.google.com/functions/docs/reference/rest/v1/projects.locations.functions) -- [storage.v1.bucket](https://cloud.google.com/storage/docs/json_api/v1/buckets) - -### Properties - -See the `properties` section in the schema file(s): -- [Cloud Function](cloud_function.py.schema) - -### Usage - -1. Clone the [Deployment Manager Samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit): - -```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit -``` - -2. Go to the [dm](../../) directory: - -```shell - cd dm -``` - - -3. Copy the example DM config to be used as a model for the deployment; in this case, [examples/cloud\_function.yaml](examples/cloud_function.yaml): - -```shell - cp templates/cloud_function/examples/cloud_function.yaml my_cloud_function.yaml -``` - -4. Change the values in the config file to match your specific GCP setup (for properties, refer to the schema files listed above): - -```shell - vim my_cloud_function.yaml # <== change values to match your GCP setup -``` - -5. Create your deployment (replace with the relevant deployment name): - -```shell - gcloud deployment-manager deployments create \ - --config my_cloud_function.yaml -``` - -6. In case you need to delete your deployment: - -```shell - gcloud deployment-manager deployments delete -``` - -## Examples - -- [Cloud Function](examples/cloud_function.yaml) - diff --git a/dm/templates/cloud_function/cloud_function.py b/dm/templates/cloud_function/cloud_function.py deleted file mode 100644 index 0510d42dfb0..00000000000 --- a/dm/templates/cloud_function/cloud_function.py +++ /dev/null @@ -1,198 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" - Creates a Cloud Function from a local file system, a Cloud Storage bucket, - or a Cloud Source Repository, and then assigns an HTTPS, Storage, or Pub/Sub - trigger to that Cloud Function. -""" - -NO_RESOURCES_OR_OUTPUTS = [], [] - -def get_source_url_output(function_name, context): - """ Generates the Cloud Function output with a link to the source archive. - """ - - return { - 'name': 'sourceArchiveUrl', - 'value': '$(ref.{}.sourceArchiveUrl)'.format(function_name, context.env['name']) - } - -def append_cloud_storage_sources(function, project, context): - """ Adds source code from the Cloud Storage. """ - - properties = context.properties - upload_path = properties.get('sourceArchiveUrl') - - resources = [] - outputs = [get_source_url_output(function['name'], context)] - - if not upload_path: - msg = "sourceArchiveUrl must be provided" - raise Exception(msg) - - function['properties']['sourceArchiveUrl'] = upload_path - - return resources, outputs - -def append_cloud_repository_sources(function, context): - """ Adds the source code from the cloud repository. """ - - repo = context.properties.get('sourceRepository', { - 'url': context.properties.get('sourceRepositoryUrl') - }) - function['properties']['sourceRepository'] = repo - - name = function['name'] - output = { - 'name': 'sourceRepositoryUrl', - 'value': '$(ref.{}.sourceRepository.deployedUrl)'.format(context.env['name']) - } - - return [], [output] - -def append_source_code(function, project, context): - """ Append a reference to the Cloud Function's source code. """ - - properties = context.properties - - if 'sourceRepository' in properties or 'sourceRepositoryUrl' in properties: - return append_cloud_repository_sources(function, context) - - if 'sourceUploadUrl' in properties: - append_optional_property(function, properties, 'sourceUploadUrl') - return [], [] - - if 'sourceArchiveUrl' in properties or 'localUploadPath' in properties: - return append_cloud_storage_sources(function, project, context) - - raise ValueError('At least one of source properties must be provided') - -def append_trigger_topic(function, properties): - """ Appends the Pub/Sub event trigger. """ - - topic = properties['triggerTopic'] - - function['properties']['eventTrigger'] = { - 'eventType': 'providers/cloud.pubsub/eventTypes/topic.publish', - 'resource': topic - } - - return NO_RESOURCES_OR_OUTPUTS - -def append_trigger_http(function, context): - """ Appends the HTTPS trigger and returns the generated URL. """ - - function['properties']['httpsTrigger'] = {} - output = { - 'name': 'httpsTriggerUrl', - 'value': '$(ref.{}.httpsTrigger.url)'.format(context.env['name']) - } - - return [], [output] - -def append_trigger_storage(function, context): - """ Appends the Storage trigger. """ - - bucket = context.properties['triggerStorage']['bucketName'] - event = context.properties['triggerStorage']['event'] - - project_id = context.env['project'] - function['properties']['eventTrigger'] = { - 'eventType': 'google.storage.object.' + event, - 'resource': 'projects/{}/buckets/{}'.format(project_id, bucket) - } - - return NO_RESOURCES_OR_OUTPUTS - -def append_trigger(function, context): - """ Adds the Trigger section and returns all the associated new - resources and outputs. - """ - - if 'triggerTopic' in context.properties: - return append_trigger_topic(function, context.properties) - elif 'triggerStorage' in context.properties: - return append_trigger_storage(function, context) - - return append_trigger_http(function, context) - -def append_optional_property(function, properties, prop_name): - """ If the property is set, it is added to the function body. """ - - val = properties.get(prop_name) - if val: - function['properties'][prop_name] = val - return - -def create_function_resource(context): - """ Creates the Cloud Function resource. """ - - properties = context.properties - name = properties.get('name', context.env['name']) - project_id = properties.get('project', context.env['project']) - location = properties.get('location', properties.get('region')) - - function = { - # https://cloud.google.com/functions/docs/reference/rest/v1/projects.locations.functions - 'type': 'gcp-types/cloudfunctions-v1:projects.locations.functions', - 'name': context.env['name'], - 'properties': - { - 'parent': 'projects/{}/locations/{}'.format(project_id, location), - 'function': name, - # 'name': 'projects/{}/locations/{}/functions/{}'.format(project_id, location, name), - }, - } - - optional_properties = ['entryPoint', - 'labels', - 'environmentVariables', - 'timeout', - 'runtime', - 'maxInstances', - 'availableMemoryMb', - 'description'] - - for prop in optional_properties: - append_optional_property(function, properties, prop) - - trigger_resources, trigger_outputs = append_trigger(function, context) - code_resources, code_outputs = append_source_code(function, project_id, context) - - if code_resources: - function['metadata'] = { - 'dependsOn': [dep['name'] for dep in code_resources] - } - - return (trigger_resources + code_resources + [function], - trigger_outputs + code_outputs + [ - { - 'name': 'region', - 'value': context.properties['region'] - }, - { - 'name': 'name', - 'value': '$(ref.{}.name)'.format(context.env['name']) - } - ]) - -def generate_config(context): - """ Entry point for the deployment resources. """ - - resources, outputs = create_function_resource(context) - - return { - 'resources': resources, - 'outputs': outputs - } diff --git a/dm/templates/cloud_function/cloud_function.py.schema b/dm/templates/cloud_function/cloud_function.py.schema deleted file mode 100644 index fa1576407c3..00000000000 --- a/dm/templates/cloud_function/cloud_function.py.schema +++ /dev/null @@ -1,214 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Cloud Function - author: Sourced Group Inc. - version: 1.0.0 - description: | - Creates a Cloud Function from a local file system, a Cloud Storage bucket, - or a cloud source repository, and then assigns HTTPS, Storage, or Pub/Sub - trigger to that Cloud Function. - - For more information on this resource: - https://cloud.google.com/functions/ - - APIs endpoints used by this template: - - gcp-types/cloudfunctions-v1:projects.locations.functions => - https://cloud.google.com/functions/docs/reference/rest/v1/projects.locations.functions - - gcp-types/cloudbuild-v1:cloudbuild.projects.builds.create => - https://cloud.google.com/cloud-build/docs/api/reference/rest/v1/projects.builds/create - - APIs that should be enabled (on the seed project as well): - - cloudfunctions.googleapis.com - - cloudbuild.googleapis.com - - Additionally, ID@cloudbuild.gserviceaccount.com service account of the seed project should have - storage.buckets.create on the target project. - -additionalProperties: false - -allOf: - - oneOf: - - required: - - region - - required: - - location - - oneOf: - - required: - - sourceRepository - - required: - - sourceRepositoryUrl - - required: - - sourceUploadUrl - - anyOf: - - required: - - sourceArchiveUrl - -properties: - name: - type: string - description: The function name. Resource name would be used if omitted. - project: - type: string - description: | - The project ID of the project containing the Cloud function. The - Google apps domain is prefixed if applicable. - region: - type: string - description: The region where the function is deployed. Deprecated, use "location" field - location: - type: string - description: The location where the function is deployed. - timeout: - type: string - description: The timeout for the function, in seconds; e.g., '120s'. - default: 60s - runtime: - name: string - description: | - The runtime in which the function is going to run. See - https://cloud.google.com/functions/docs/concepts/exec#runtimes. - enum: - - go111 - - nodejs6 # deprecated! - - nodejs8 - - nodejs10 - - python37 - default: nodejs10 - availableMemoryMb: - type: integer - description: The amount of memory available for the function, MB. - default: 256 - entryPoint: - type: string - description: | - The function name (as defined in the source code) to be executed. - Defaults to the resource name's suffix. - sourceUploadUrl: - type: string - description: | - The Google Cloud Storage signed URL used for source uploading, generated by - [google.cloud.functions.v1.GenerateUploadUrl][] - sourceArchiveUrl: - type: string - description: | - The URL of the archive containing the Cloud Function's source code - in Google Storage, starting with gs://, pointing to the zip archive which contains the function. - When used along with localUploadPath, this is the path to which the source code is uploaded. If the URL points - to a non-existing bucket, the bucket is created automatically. - sourceRepository: - type: object - additionalProperties: false - description: | - The source repository where a function is hosted. - required: - - url - properties: - url: - type: string - description: | - The URL pointing to the hosted repository where the function is defined. There are supported Cloud Source - Repository URLs in the following formats: - - To refer to a specific commit: - https://source.developers.google.com/projects/*/repos/*/revisions/*/paths/* - To refer to a moveable alias (branch): - https://source.developers.google.com/projects/*/repos/*/moveable-aliases/*/paths/* - In particular, to refer to HEAD use master moveable alias. - To refer to a specific fixed alias (tag): - https://source.developers.google.com/projects/*/repos/*/fixed-aliases/*/paths/* - - You may omit paths/* if you want to use the main directory. - sourceRepositoryUrl: - type: string - description: | - DEPRECATED, alias for sourceRepository->url - triggerTopic: - type: string - description: | - The Pub/Sub topic name (projects/PROJECT_NAME/topics/TOPIC_NAME) that - triggers the Cloud Function. If neither triggerTopic nor triggerStorage are - provided, the function is triggered by an HTTPS call. See more at - https://cloud.google.com/functions/docs/concepts/events-triggers#events. - triggerStorage: - type: object - additionalProperties: false - description: | - Configures the Cloud Storage trigger for the function. If neither triggerTopic - nor triggerStorage are provided, the function is triggered by an HTTPS call. - See more at https://cloud.google.com/functions/docs/concepts/events-triggers#events. - required: - - bucketName - - event - properties: - bucketName: - type: string - description: | - The name of the bucket triggering the event; e.g. my-bucket-name. - event: - type: string - description: | - The trigger event name. See more at - https://cloud.google.com/functions/docs/calling/storage#cloud_storage_event_types. - enum: - - finalize - - delete - - archive - - metadataUpdate - labels: - type: object - description: | - Map labels associated with this Cloud Function. - Example: - name: wrench - mass: 1.3kg - count: 3 - environmentVariables: - type: object - description: | - Map of environment variables that shall be available during function execution. - Example: - FOO: BAR - maxInstances: - type: number - description: | - The limit on the maximum number of function instances that may coexist at a given time. - This feature is currently in alpha, available only for whitelisted users. - -outputs: - region: - description: The region where the function is deployed. - type: string - name: - description: The function name. - type: string - httpsTriggerUrl: - description: For HTTPS-triggered functions, the trigger URL. - type: string - sourceRepositoryUrl: - description: | - For functions deployed from cloud repositories, - the repository URL. - type: string - sourceArchiveUrl: - description: For functions deployed from Cloud Storage, the bucket URL. - type: string - -documentation: - - templates/cloud_function/README.md - -examples: - - templates/cloud_function/examples/cloud_function.yaml - - templates/cloud_function/examples/cloud_function_upload.yaml diff --git a/dm/templates/cloud_function/examples/cloud_function.yaml b/dm/templates/cloud_function/examples/cloud_function.yaml deleted file mode 100644 index 54647223f1e..00000000000 --- a/dm/templates/cloud_function/examples/cloud_function.yaml +++ /dev/null @@ -1,22 +0,0 @@ -# Example of the Cloud Function template usage. -# -# In this example, a Cloud Function with an HTTP trigger -# is created from the existing source. -# -# Replace the following placeholders with valid values: -# : a region where the function resides -# : a URL, starting with gs://, -# pointing to an existing ZIP archive that contains the function -# : the name of the function in the archive -# -imports: - - path: templates/cloud_function/cloud_function.py - name: cloud_function.py - -resources: - - name: test-function - type: cloud_function.py - properties: - region: - entryPoint: - sourceArchiveUrl: diff --git a/dm/templates/cloud_function/function/helloGET.js b/dm/templates/cloud_function/function/helloGET.js deleted file mode 100644 index 55700836c15..00000000000 --- a/dm/templates/cloud_function/function/helloGET.js +++ /dev/null @@ -1,10 +0,0 @@ -/** - * Responds to any HTTP request with 'Hello World!'. - * - * @param {Object} req Cloud Function request context. - * @param {Object} res Cloud Function response context. - */ -exports.helloGET = function (req, res) { - res.status(200).send('Hello world!'); -}; - diff --git a/dm/templates/cloud_function/function/package.json b/dm/templates/cloud_function/function/package.json deleted file mode 100644 index 4a7caa6596e..00000000000 --- a/dm/templates/cloud_function/function/package.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "name": "function", - "version": "1.0.0", - "description": "A sample cloud function", - "main": "helloGET.js", - "scripts": { - "test": "echo \"Error: no test specified\" && exit 1" - }, - "author": "Google Inc.", - "license": "Apache2" -} diff --git a/dm/templates/cloud_function/tests/integration/cloud_function.bats b/dm/templates/cloud_function/tests/integration/cloud_function.bats deleted file mode 100755 index 706f492a40b..00000000000 --- a/dm/templates/cloud_function/tests/integration/cloud_function.bats +++ /dev/null @@ -1,109 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores with dashes in the deployment name. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < "templates/cloud_function/tests/integration/${TEST_NAME}.yaml" > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - gcloud pubsub topics create topic-${RAND} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - create_config - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - gcloud pubsub topics delete topic-${RAND} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - gsutil rm -r gs://test-function-http-${RAND} - delete_config - fi - - # Per-test teardown steps. -} - - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config ${CONFIG} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -} - -@test "Verifying that cloud functions were created in deployment ${DEPLOYMENT_NAME}" { - run gcloud functions list --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$output" =~ "test-function-https-name-${RAND}" ]] - [[ "$output" =~ "test-function-storage-${RAND}" ]] - [[ "$output" =~ "test-function-topic-${RAND}" ]] -} - -@test "Verifying that test-function-https-name-${RAND} properties are set" { - run gcloud functions describe test-function-https-name-${RAND} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$output" =~ "availableMemoryMb: 512" ]] - [[ "$output" =~ "timeout: 120s" ]] - [[ "$output" =~ "sourceArchiveUrl: gs://test-function-http-${RAND}/helloGET.zip" ]] -} - -@test "Verifying that test-function-https-name-${RAND} trigger is set" { - run gcloud functions describe test-function-https-name-${RAND} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$output" =~ "url: https://us-central1-${CLOUD_FOUNDATION_PROJECT_ID}.cloudfunctions.net/test-function-https-name-${RAND}" ]] -} - -@test "Verifying that test-function-topic-${RAND} trigger is set" { - run gcloud functions describe test-function-topic-${RAND} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$output" =~ "providers/cloud.pubsub/eventTypes/topic.publish" ]] - [[ "$output" =~ "projects/${CLOUD_FOUNDATION_PROJECT_ID}/topics/topic-${RAND}" ]] -} - -@test "Verifying that test-function-storage-${RAND} trigger is set" { - run gcloud functions describe test-function-storage-${RAND} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$output" =~ "eventType: google.storage.object.finalize" ]] - [[ "$output" =~ "resource: projects/${CLOUD_FOUNDATION_PROJECT_ID}/buckets/test-function-http-${RAND}" ]] -} - -@test "Deleting deployment" { - gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" -q \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - run gcloud functions list --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ ! "$output" =~ "test-function-https-name-${RAND}" ]] - [[ ! "$output" =~ "test-function-storage-${RAND}" ]] - [[ ! "$output" =~ "test-function-topic-${RAND}" ]] -} diff --git a/dm/templates/cloud_function/tests/integration/cloud_function.yaml b/dm/templates/cloud_function/tests/integration/cloud_function.yaml deleted file mode 100644 index 2f65ac119ac..00000000000 --- a/dm/templates/cloud_function/tests/integration/cloud_function.yaml +++ /dev/null @@ -1,43 +0,0 @@ -# Test of the Cloud Function template usage. -# -# Variables: -# RAND: a random string used by the testing suite. -# - -imports: - - path: templates/cloud_function/cloud_function.py - name: cloud_function.py - - path: templates/cloud_function/function/helloGET.js - - path: templates/cloud_function/function/package.json - -resources: - - - name: test-function-https-${RAND} - type: cloud_function.py - properties: - name: test-function-https-name-${RAND} - region: us-central1 - timeout: 120s - availableMemoryMb: 512 - entryPoint: helloGET - localUploadPath: templates/cloud_function/function - sourceArchiveUrl: gs://test-function-http-${RAND}/helloGET.zip - - - name: test-function-topic-${RAND} - type: cloud_function.py - properties: - region: us-central1 - entryPoint: helloGET - sourceArchiveUrl: $(ref.test-function-https-${RAND}.sourceArchiveUrl) - triggerTopic: projects/${CLOUD_FOUNDATION_PROJECT_ID}/topics/topic-${RAND} - - - name: test-function-storage-${RAND} - type: cloud_function.py - properties: - region: us-central1 - entryPoint: helloGET - sourceArchiveUrl: $(ref.test-function-https-${RAND}.sourceArchiveUrl) - triggerStorage: - bucketName: test-function-http-${RAND} - event: finalize - diff --git a/dm/templates/cloud_router/README.md b/dm/templates/cloud_router/README.md deleted file mode 100644 index edf5debaf84..00000000000 --- a/dm/templates/cloud_router/README.md +++ /dev/null @@ -1,64 +0,0 @@ -# Cloud Router - -This template creates a Cloud Router. - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) -- Create a [network](../network/README.md) -- Grant the [compute.networkAdmin](https://cloud.google.com/compute/docs/access/iam) IAM role to the project service account - -## Deployment - -### Resources - -- [compute.v1.router](https://cloud.google.com/compute/docs/reference/rest/v1/routers) - -### Properties - -See the `properties` section in the schema file(s): -- [Cloud Router](cloud_router.py.schema) - -### Usage - -1. Clone the [Deployment Manager Samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit): - -``` - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit -``` - -2. Go to the [cloud-foundation-toolkit/dm](../../) directory: - -``` - cd cloud-foundation-toolkit/dm -``` - -3. Copy the example DM config to be used as a model for the deployment; in this case, [examples/cloud_router.yaml](examples/cloud_router.yaml): - -``` - cp templates/cloud_router/examples/cloud_router.yaml my_cloud_router.yaml -``` - -4. Change the values in the config file to match your specific GCP setup (for properties, refer to the schema files listed above): - -``` - vim my_cloud_router.yaml # <== change values to match your GCP setup -``` - -5. Create your deployment (replace with the relevant deployment name): - -``` - gcloud deployment-manager deployments create \ - --config my_cloud_router.yaml -``` - -6. In case you need to delete your deployment: - -``` - gcloud deployment-manager deployments delete -``` - -## Examples - -- [Cloud Router](examples/cloud_router.yaml) diff --git a/dm/templates/cloud_router/cloud_router.py b/dm/templates/cloud_router/cloud_router.py deleted file mode 100644 index 117ceed9939..00000000000 --- a/dm/templates/cloud_router/cloud_router.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" This template creates a Cloud Router. """ - - -def append_optional_property(res, properties, prop_name): - """ If the property is set, it is added to the resource. """ - - val = properties.get(prop_name) - if val: - res['properties'][prop_name] = val - return - -def generate_config(context): - """ Entry point for the deployment resources. """ - - properties = context.properties - name = properties.get('name', context.env['name']) - project_id = properties.get('project', context.env['project']) - - bgp = properties.get('bgp', {'asn': properties.get('asn')}) - - router = { - 'name': context.env['name'], - # https://cloud.google.com/compute/docs/reference/rest/v1/routers - 'type': 'gcp-types/compute-v1:routers', - 'properties': - { - 'name': - name, - 'project': - project_id, - 'region': - properties['region'], - 'network': - properties.get('networkURL', generate_network_uri( - project_id, - properties.get('network', ''))), - } - } - - if properties.get('bgp'): - router['properties']['bgp'] = bgp - - optional_properties = [ - 'description', - 'bgpPeers', - 'interfaces', - 'nats', - ] - - for prop in optional_properties: - append_optional_property(router, properties, prop) - - return { - 'resources': [router], - 'outputs': - [ - { - 'name': 'name', - 'value': name - }, - { - 'name': 'selfLink', - 'value': '$(ref.' + context.env['name'] + '.selfLink)' - }, - { - 'name': - 'creationTimestamp', - 'value': - '$(ref.' + context.env['name'] + '.creationTimestamp)' - } - ] - } - - -def generate_network_uri(project_id, network): - """Format the network name as a network URI.""" - - return 'projects/{}/global/networks/{}'.format( - project_id, - network - ) diff --git a/dm/templates/cloud_router/cloud_router.py.schema b/dm/templates/cloud_router/cloud_router.py.schema deleted file mode 100644 index d1c42067ae9..00000000000 --- a/dm/templates/cloud_router/cloud_router.py.schema +++ /dev/null @@ -1,425 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Cloud Router - author: Sourced Group Inc. - version: 1.1.0 - description: | - Deploys a Cloud Router. - - For more information on this resource: - https://cloud.google.com/router/docs/ - - APIs endpoints used by this template: - - gcp-types/compute-v1:routers => - https://cloud.google.com/compute/docs/reference/rest/v1/routers - -additionalProperties: false - -allOf: - - required: - - region - - oneOf: - - required: - - networkURL - - required: - - network - - oneOf: - - required: - - asn - - required: - - bgp - - required: - - nats - -properties: - name: - type: string - description: | - Must comply with RFC1035. Specifically, the name must be 1-63 characters long and match - the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, - and all following characters must be a dash, lowercase letter, or digit, except the last character, - which cannot be a dash. - Resource name would be used if omitted. - description: - type: string - description: | - An optional description of this resource. Provide this property when you create the resource. - project: - type: string - description: | - The project ID of the project containing the Cloud Router instance. The - Google apps domain is prefixed if applicable. - region: - type: string - description: The URI of the region where the Cloud Router resides. - networkURL: - type: string - description: The URL (or URI) of the network to which the Cloud Router belongs. - network: - type: string - description: The name of the network to which the Cloud Router belongs (without project prefix). - bgp: - type: object - additionalProperties: false - description: | - BGP information specific to this router. - required: - - asn - properties: - asn: - type: integer - description: | - The local BGP Autonomous System Number (ASN). Must be an RFC6996 private ASN, - either 16-bit or 32-bit. The value will be fixed for this router. - All VPN tunnels that link to this router will have the same - local ASN. - advertiseMode: - type: string - description: | - User-specified flag to indicate which mode to use for advertisement. The options are DEFAULT or CUSTOM. - enum: - - DEFAULT - - CUSTOM - advertisedGroups: - type: array - description: | - User-specified list of prefix groups to advertise in custom mode. This field can only be populated if - advertiseMode is CUSTOM and is advertised to all peers of the router. These groups will be advertised - in addition to any specified prefixes. Leave this field blank to advertise no custom groups. - uniqueItems: True - items: - type: string - enum: - - ALL_SUBNETS - advertisedIpRanges: - type: array - description: | - User-specified list of individual IP ranges to advertise in custom mode. This field can only be populated - if advertiseMode is CUSTOM and is advertised to all peers of the router. These IP ranges will be advertised - in addition to any specified groups. Leave this field blank to advertise no custom IP ranges. - uniqueItems: True - items: - type: object - additionalProperties: false - required: - - range - properties: - range: - type: string - description: | - The IP range to advertise. The value must be a CIDR-formatted string. - description: - type: string - description: | - User-specified description for the IP range. - bgpPeers: - type: array - description: | - BGP information that must be configured into the routing stack to establish BGP peering. This information - must specify the peer ASN and either the interface name, IP address, or peer IP address. Please refer to RFC4273. - uniqueItems: True - items: - type: object - additionalProperties: false - required: - - name - - interfaceName - - ipAddress - - peerIpAddress - properties: - name: - type: string - description: | - Name of this BGP peer. The name must be 1-63 characters long and comply with RFC1035. - interfaceName: - type: string - description: | - Name of the interface the BGP peer is associated with. - ipAddress: - type: string - description: | - IP address of the interface inside Google Cloud Platform. Only IPv4 is supported. - peerIpAddress: - type: string - description: | - IP address of the BGP interface outside Google Cloud Platform. Only IPv4 is supported. - peerAsn: - type: string - description: | - Peer BGP Autonomous System Number (ASN). Each BGP interface may use a different value. - advertisedRoutePriority: - type: string - description: | - The priority of routes advertised to this BGP peer. Where there is more than one matching - route of maximum length, the routes with the lowest priority value win. - advertiseMode: - type: string - description: | - User-specified flag to indicate which mode to use for advertisement. - advertisedGroups: - type: array - description: | - User-specified list of prefix groups to advertise in custom mode, which can take - one of the following options: - - - ALL_SUBNETS: Advertises all available subnets, including peer VPC subnets. - - ALL_VPC_SUBNETS: Advertises the router's own VPC subnets. - - ALL_PEER_VPC_SUBNETS: Advertises peer subnets of the router's VPC network. - Note that this field can only be populated if advertiseMode is CUSTOM and overrides the list - defined for the router (in the "bgp" message). These groups are advertised in addition - to any specified prefixes. Leave this field blank to advertise no custom groups. - uniqueItems: True - items: - type: string - enum: - - ALL_SUBNETS - - ALL_VPC_SUBNETS - - ALL_PEER_VPC_SUBNETS - advertisedIpRanges: - type: array - description: | - User-specified list of individual IP ranges to advertise in custom mode. This field can only - be populated if advertiseMode is CUSTOM and overrides the list defined for - the router (in the "bgp" message). These IP ranges are advertised in addition to any specified groups. - Leave this field blank to advertise no custom IP ranges. - uniqueItems: True - items: - type: object - additionalProperties: false - required: - - range - properties: - range: - type: string - description: | - The IP range to advertise. The value must be a CIDR-formatted string. - randescriptionge: - type: string - description: | - User-specified description for the IP range. - interfaces: - type: array - description: | - Router interfaces. Each interface requires either one linked resource, (for example, linkedVpnTunnel), - or IP address and IP address range (for example, ipRange), or both. - uniqueItems: True - items: - type: object - additionalProperties: false - required: - - name - oneOf: - - allOf: - - required: - - linkedVpnTunnel - - not: - required: - - linkedInterconnectAttachment - - allOf: - - required: - - linkedInterconnectAttachment - - not: - required: - - linkedVpnTunnel - properties: - name: - type: string - description: | - Name of this interface entry. The name must be 1-63 characters long and comply with RFC1035. - linkedVpnTunnel: - type: string - description: | - URI of the linked VPN tunnel, which must be in the same region as the router. Each interface can have - one linked resource, which can be either a VPN tunnel or an Interconnect attachment. - linkedInterconnectAttachment: - type: string - description: | - URI of the linked Interconnect attachment. It must be in the same region as the router. Each interface - can have one linked resource, which can be either be a VPN tunnel or an Interconnect attachment. - ipRange: - type: string - description: | - IP address and range of the interface. The IP range must be in the RFC3927 link-local IP address space. - The value must be a CIDR-formatted string, for example: 169.254.0.1/30. NOTE: Do not truncate the address - as it represents the IP address of the interface. - nats: - type: array - description: | - A list of NAT services created in this router. - uniqueItems: True - items: - type: object - additionalProperties: false - required: - - name - - sourceSubnetworkIpRangesToNat - - natIpAllocateOption - oneOf: - - allOf: - - properties: - sourceSubnetworkIpRangesToNat: - enum: - - ALL_SUBNETWORKS_ALL_IP_RANGES - - not: - required: - - subnetworks - - allOf: - - properties: - sourceSubnetworkIpRangesToNat: - enum: - - ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES - - not: - required: - - subnetworks - - allOf: - - properties: - sourceSubnetworkIpRangesToNat: - enum: - - LIST_OF_SUBNETWORKS - - required: - - subnetworks - properties: - name: - type: string - description: | - Unique name of this Nat service. The name must be 1-63 characters long and comply with RFC1035. - sourceSubnetworkIpRangesToNat: - type: string - description: | - Specify the Nat option, which can take one of the following values: - - - ALL_SUBNETWORKS_ALL_IP_RANGES: All of the IP ranges in every Subnetwork are allowed to Nat. - - ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES: All of the primary IP ranges in every Subnetwork are allowed to Nat. - - LIST_OF_SUBNETWORKS: A list of Subnetworks are allowed to Nat (specified in the field subnetwork below) - The default is SUBNETWORK_IP_RANGE_TO_NAT_OPTION_UNSPECIFIED. Note that if this field contains - ALL_SUBNETWORKS_ALL_IP_RANGES or ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any - other Router.Nat section in any Router for this network in this region. - enum: - - ALL_SUBNETWORKS_ALL_IP_RANGES - - ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES - - LIST_OF_SUBNETWORKS - subnetworks: - type: array - description: | - A list of Subnetwork resources whose traffic should be translated by NAT Gateway. It is used only - when LIST_OF_SUBNETWORKS is selected for the SubnetworkIpRangeToNatOption above. - uniqueItems: True - items: - type: object - additionalProperties: false - required: - - name - properties: - name: - type: string - description: | - URL for the subnetwork resource that will use NAT. - sourceIpRangesToNat: - type: array - description: | - Specify the options for NAT ranges in the Subnetwork. All options of a single value - are valid except NAT_IP_RANGE_OPTION_UNSPECIFIED. The only valid option with - multiple values is: ["PRIMARY_IP_RANGE", "LIST_OF_SECONDARY_IP_RANGES"] Default: [ALL_IP_RANGES] - uniqueItems: True - items: - type: string - secondaryIpRangeNames: - type: array - description: | - A list of the secondary ranges of the Subnetwork that are allowed to use NAT. - This can be populated only if "LIST_OF_SECONDARY_IP_RANGES" is - one of the values in sourceIpRangesToNat. - uniqueItems: True - items: - type: string - natIps: - type: array - description: | - A list of URLs of the IP resources used for this Nat service. These IP addresses must - be valid static external IP addresses assigned to the project. - uniqueItems: True - items: - type: string - natIpAllocateOption: - type: string - description: | - Specify the NatIpAllocateOption, which can take one of the following values: - - - MANUAL_ONLY: Uses only Nat IP addresses provided by customers. - When there are not enough specified Nat IPs, the Nat service fails for new VMs. - - AUTO_ONLY: Nat IPs are allocated by Google Cloud Platform; customers can't specify any Nat IPs. - When choosing AUTO_ONLY, then natIp should be empty. - enum: - - MANUAL_ONLY - - AUTO_ONLY - minPortsPerVm: - type: integer - description: | - Minimum number of ports allocated to a VM from this NAT config. If not set, a default - number of ports is allocated to a VM. This is rounded up to the nearest power of 2. - For example, if the value of this field is 50, at least 64 ports are allocated to a VM. - udpIdleTimeoutSec: - type: integer - description: | - Timeout (in seconds) for UDP connections. Defaults to 30s if not set. - icmpIdleTimeoutSec: - type: integer - description: | - Timeout (in seconds) for ICMP connections. Defaults to 30s if not set. - tcpEstablishedIdleTimeoutSec: - type: integer - description: | - Timeout (in seconds) for TCP established connections. Defaults to 1200s if not set. - tcpTransitoryIdleTimeoutSec: - type: integer - description: | - Timeout (in seconds) for TCP transitory connections. Defaults to 30s if not set. - logConfig: - type: object - additionalProperties: false - description: | - Configure logging on this NAT. - properties: - enable: - type: boolean - description: | - Indicates whether or not to export logs. This is false by default. - filter: - type: string - description: | - Specifies the desired filtering of logs on this NAT. If unspecified, logs are exported - for all connections handled by this NAT. - asn: - type: integer - description: | - DEPRECATED. Alias for bgp->asn - -outputs: - name: - type: string - description: The name of the Cloud Router resource. - selfLink: - type: string - description: The URI (SelfLink) of the Cloud Router resource. - creationTimestamp: - type: string - description: Creation timestamp in RFC3339 text format. - -documentation: - - templates/cloud_router/README.md - -examples: - - templates/cloud_router/examples/cloud_router.yaml diff --git a/dm/templates/cloud_router/examples/cloud_nat_router.yaml b/dm/templates/cloud_router/examples/cloud_nat_router.yaml deleted file mode 100644 index 0d208cfd81f..00000000000 --- a/dm/templates/cloud_router/examples/cloud_nat_router.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# Example of the Cloud Router template usage to create CloudNAT -# -# Replace the following placeholders with valid values: -# : the name the network to which the router belongs -# : subnetwork that belongs to the network, whose -# traffic should be translated by NAT Gateway. - -imports: - - path: templates/cloud_router/cloud_router.py - name: cloud_router.py - -resources: - - name: test-cloud-nat-router - type: cloud_router.py - properties: - name: cloud-nat-router - network: - region: us-east1 - nats: - - name: cloud-nat - sourceSubnetworkIpRangesToNat: LIST_OF_SUBNETWORKS - natIpAllocateOption: AUTO_ONLY - subnetworks: - - name: diff --git a/dm/templates/cloud_router/examples/cloud_router.yaml b/dm/templates/cloud_router/examples/cloud_router.yaml deleted file mode 100644 index 2eb0d8aa3e1..00000000000 --- a/dm/templates/cloud_router/examples/cloud_router.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# Example of the Cloud Router template usage. -# -# Replace the following placeholders with valid values: -# : the name the network to which the router belongs -# - -imports: - - path: templates/cloud_router/cloud_router.py - name: cloud_router.py - -resources: - - name: test-cloud-router - type: cloud_router.py - properties: - name: cloud-router - network: - region: us-east1 - asn: 65001 diff --git a/dm/templates/cloud_router/tests/integration/cloud_router.bats b/dm/templates/cloud_router/tests/integration/cloud_router.bats deleted file mode 100644 index ef5ddd4fb7a..00000000000 --- a/dm/templates/cloud_router/tests/integration/cloud_router.bats +++ /dev/null @@ -1,81 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export REGION="us-east1" - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores with dashes in the deployment name. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < "templates/cloud_router/tests/integration/${TEST_NAME}.yaml" > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - gcloud compute networks create network-${RAND} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --description "integration test ${RAND}" \ - --subnet-mode auto -q - create_config - fi - - # Per-test setup steps. - } - -function teardown() { - # Global teardown; executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - gcloud compute networks delete network-${RAND} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -q - rm -f "${RANDOM_FILE}" - delete_config - fi - - # Per-test teardown steps. -} - - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config ${CONFIG} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -} - -@test "Verifying that routers were created in deployment ${DEPLOYMENT_NAME}" { - run gcloud compute routers list --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$output" =~ "cloud-router-${RAND}" ]] - [[ "$output" =~ "cloud-router-nat-${RAND}" ]] -} - -@test "Deleting deployment" { - gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -q - - run gcloud compute routers list --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ ! "$output" =~ "cloud-router-${RAND}" ]] -} diff --git a/dm/templates/cloud_router/tests/integration/cloud_router.yaml b/dm/templates/cloud_router/tests/integration/cloud_router.yaml deleted file mode 100644 index e1219c9f040..00000000000 --- a/dm/templates/cloud_router/tests/integration/cloud_router.yaml +++ /dev/null @@ -1,30 +0,0 @@ -# Test of the Cloud Router template usage. -# -# Variables: -# RAND: a random string used by the testing suite. -# - -imports: - - path: templates/cloud_router/cloud_router.py - name: cloud_router.py - -resources: - - name: cloud-router-${RAND} - type: cloud_router.py - properties: - name: cloud-router-${RAND} - network: network-${RAND} - region: ${REGION} - asn: 65001 - - name: cloud-router-nat-${RAND} - type: cloud_router.py - properties: - name: cloud-router-nat-${RAND} - network: network-${RAND} - region: ${REGION} - nats: - - name: cloud-nat-test-${RAND} - sourceSubnetworkIpRangesToNat: LIST_OF_SUBNETWORKS - natIpAllocateOption: AUTO_ONLY - subnetworks: - - name: projects/${CLOUD_FOUNDATION_PROJECT_ID}/regions/${REGION}/subnetworks/network-${RAND} diff --git a/dm/templates/cloud_router/tests/schemas/invalid_additional_options.yaml b/dm/templates/cloud_router/tests/schemas/invalid_additional_options.yaml deleted file mode 100644 index 41c7594ae79..00000000000 --- a/dm/templates/cloud_router/tests/schemas/invalid_additional_options.yaml +++ /dev/null @@ -1,4 +0,0 @@ -network: asd -region: us-east1 -asn: 65001 -foo: bar diff --git a/dm/templates/cloud_router/tests/schemas/valid_basic.yaml b/dm/templates/cloud_router/tests/schemas/valid_basic.yaml deleted file mode 100644 index c4e9529f7c0..00000000000 --- a/dm/templates/cloud_router/tests/schemas/valid_basic.yaml +++ /dev/null @@ -1,5 +0,0 @@ -network: asd -region: us-east1 -asn: 65001 -name: foo -project: foo diff --git a/dm/templates/cloud_spanner/README.md b/dm/templates/cloud_spanner/README.md deleted file mode 100644 index a7c0c93004f..00000000000 --- a/dm/templates/cloud_spanner/README.md +++ /dev/null @@ -1,65 +0,0 @@ -# Cloud Spanner - -This template creates a Cloud Spanner instance cluster and database. - -## Prerequisites -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) -- Grant the [spanner.admin](https://cloud.google.com/spanner/docs/iam) IAM role to the project service account - -## Deployment - -### Resources - -- [gcp-types/spanner-v1](https://cloud.google.com/spanner/docs/reference/rest/v1/projects.instances) - -### Properties - -See the `properties` section in the schema file(s): -- [Cloud Spanner](cloud_spanner.py.schema) - - -#### Usage - -1. Clone the [Deployment Manager samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit) - -```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit -``` - -2. Go to the [dm](../../../cloud-foundation) directory - -```shell - cd dm -``` - -3. Copy the example DM config to be used as a model for the deployment, in this case [examples/cloud_spanner.yaml](examples/cloud_spanner.yaml) - -```shell - cp templates/cloud_spanner/examples/cloud_spanner.yaml my_cloud_spanner.yaml -``` - -4. Change the values in the config file to match your specific GCP setup. - Refer to the properties in the schema files described above. - -```shell - vim my_cloud_spanner.yaml # <== change values to match your GCP setup -``` - -5. Create your deployment as described below, replacing - with your with your own deployment name - -```shell - gcloud deployment-manager deployments create \ - --config my_cloud_spanner.yaml -``` - -6. In case you need to delete your deployment: - -```shell - gcloud deployment-manager deployments delete -``` - -## Examples - -- [Cloud Spanner](examples/cloud_spanner.yaml) diff --git a/dm/templates/cloud_spanner/cloud_spanner.py b/dm/templates/cloud_spanner/cloud_spanner.py deleted file mode 100644 index 2f7367af33c..00000000000 --- a/dm/templates/cloud_spanner/cloud_spanner.py +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" This template creates a Cloud Spanner instance and database. """ - - -def append_optional_property(res, properties, prop_name): - """ If the property is set, it is added to the resource. """ - - val = properties.get(prop_name) - if val: - res['properties'][prop_name] = val - return - -def get_spanner_instance_id(project_id, base_name): - """ Generate the instance URL """ - - return "projects/{}/instances/{}".format(project_id, base_name) - - -def get_spanner_instance_config(project_id, config): - """ Generate the instance config URL """ - - return "projects/{}/instanceConfigs/{}".format(project_id, config) - - -def generate_config(context): - """ - Generates the config gcloud needs to create a Cloud Spanner instance. - Input: context - generated by gcloud when loading the input config file. - Output: dictionary with key resource names - this contains all the - information gcloud needs to create a spanner instance, - databases, and permissions. - """ - - resources_list = [] - properties = context.properties - name = properties.get('name', context.env['name']) - project_id = properties.get('project', context.env['project']) - - instance_id = get_spanner_instance_id(project_id, name) - instance_config = get_spanner_instance_config( - project_id, - context.properties['instanceConfig'] - ) - - resource = { - 'name': name, - # https://cloud.google.com/spanner/docs/reference/rest/v1/projects.instances - 'type': 'gcp-types/spanner-v1:projects.instances', - 'properties': - { - 'instanceId': name, - 'parent': 'projects/{}'.format(project_id), - 'instance': - { - 'name': instance_id, - 'config': instance_config, - 'nodeCount': context.properties['nodeCount'], - 'displayName': context.properties['displayName'] - } - } - } - - optional_properties = [ - 'labels', - ] - for prop in optional_properties: - append_optional_property(resource, properties, prop) - resources_list.append(resource) - - out = {} - for database in context.properties.get("databases", []): - database_resource_name = "{}{}{}".format( - instance_id, - "/databases/", - database['name'] - ) - database_resource = { - 'name': database_resource_name, - # https://cloud.google.com/spanner/docs/reference/rest/v1/projects.instances.databases - 'type': 'gcp-types/spanner-v1:projects.instances.databases', - 'properties': - { - 'parent': instance_id, - 'databaseId': database['name'] - }, - 'metadata': { - 'dependsOn': [name] - } - } - resources_list.append(database_resource) - - out[database_resource_name] = { - 'state': '$(ref.' + database_resource_name + '.state)' - } - - outputs = [ - { - 'name': 'state', - 'value': '$(ref.' + name + '.state)' - }, - { - 'name': 'databases', - 'value': out - } - ] - - return {'resources': resources_list, 'outputs': outputs} diff --git a/dm/templates/cloud_spanner/cloud_spanner.py.schema b/dm/templates/cloud_spanner/cloud_spanner.py.schema deleted file mode 100644 index 5809db6e15a..00000000000 --- a/dm/templates/cloud_spanner/cloud_spanner.py.schema +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Cloud Spanner - author: Sourced Group Inc. - version: 1.0.0 - description: Creates a Cloud Spanner instance and database. - -additionalProperties: false - -required: - - displayName - - nodeCount - - instanceConfig - -properties: - name: - type: string - description: | - A unique identifier for the instance, which cannot be changed after the instance is created.Values are - of the form projects//instances/[a-z][-a-z0-9]*[a-z0-9]. The final segment of the name must be - between 2 and 64 characters in length. - This does not include the project ID. Resource name would be used if omitted. - project: - type: string - description: | - The project ID of the project containing the Cloud Spanner instance. The - Google apps domain is prefixed if applicable. - displayName: - type: string - description: The cluster display name in GCP Console. - nodeCount: - type: integer - minimum: 1 - description: The number of instances allocated to your node. - instanceConfig: - type: string - description: The cluster location. - enum: - - eur3 - - nam-eur-asia1 - - nam3 - - nam6 - - regional-asia-east1 - - regional-asia-east2 - - regional-asia-northeast1 - - regional-asia-northeast2 - - regional-asia-south1 - - regional-asia-southeast1 - - regional-australia-southeast1 - - regional-europe-north1 - - regional-europe-west1 - - regional-europe-west2 - - regional-europe-west4 - - regional-europe-west6 - - regional-northamerica-northeast1 - - regional-us-central1 - - regional-us-east1 - - regional-us-east4 - - regional-us-west1 - - regional-us-west2 - labels: - type: object - description: | - Map labels associated with this Cloud spanner instance. - Example: - name: wrench - mass: 1.3kg - count: 3 - databases: - type: array - description: A list of databases created under the instance cluster. - items: - type: object - additionalProperties: false - required: - - name - properties: - name: - type: string - description: The name of the database created under the instance cluster. - -outputs: - name: - type: string - description: The name of the cloud spanner instance. - state: - type: string - description: | - The current cloud spanner instance state. For more information: - - https://cloud.google.com/spanner/docs/reference/rest/v1/projects.instances#State - databases: - type: array - description: | - Array of database details. For example, the output can be referenced - as: `$(ref..databases..state)` - items: - description: The name of the address resource. - patternProperties: - ".*": - type: object - additionalProperties: false - description: Details for an address resource. - properties: - state: - type: string - description: | - The current database state. For more information: - - https://cloud.google.com/spanner/docs/reference/rest/v1/projects.instances.databases#Database - -documentation: - - templates/cloud_spanner/README.md - -examples: - - templates/cloud_spanner/examples/cloud_spanner.yaml diff --git a/dm/templates/cloud_spanner/examples/cloud_spanner.yaml b/dm/templates/cloud_spanner/examples/cloud_spanner.yaml deleted file mode 100644 index 9d0f81eccd5..00000000000 --- a/dm/templates/cloud_spanner/examples/cloud_spanner.yaml +++ /dev/null @@ -1,16 +0,0 @@ -# Example of the Cloud Spanner template usage. - -imports: - - path: templates/cloud_spanner/cloud_spanner.py - name: cloud_spanner.py - -resources: - - name: myspannercluster - type: cloud_spanner.py - properties: - displayName: "Spanner Cluster 1" - nodeCount: 2 - instanceConfig: nam3 - databases: - - name: "spannerdb1" - \ No newline at end of file diff --git a/dm/templates/cloud_spanner/tests/integration/cloud_spanner.bats b/dm/templates/cloud_spanner/tests/integration/cloud_spanner.bats deleted file mode 100755 index 616d7b4e4bc..00000000000 --- a/dm/templates/cloud_spanner/tests/integration/cloud_spanner.bats +++ /dev/null @@ -1,95 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -## Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" -fi - -export PROJECT_NUMBER=$(gcloud projects list | grep "${CLOUD_FOUNDATION_PROJECT_ID}" | awk {'print $NF'}) - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < "templates/cloud_spanner/tests/integration/${TEST_NAME}.yaml" > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - fi -} - -function teardown() { - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - rm -f "${RANDOM_FILE}" - delete_config - fi -} - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - run gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config ${CONFIG} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] -} - -@test "Verifying that Spanner cluster was created as part of ${DEPLOYMENT_NAME}" { - run gcloud spanner instances list \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "test-myspannercluster-${RAND}" ]] -} - -@test "Verifying that Spanner cluster IAM was created as part of ${DEPLOYMENT_NAME}" { - run gcloud spanner instances get-iam-policy test-myspannercluster-"${RAND}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${PROJECT_NUMBER}@cloudservices.gserviceaccount.com" ]] -} - -@test "Verifying that Spanner DB was created as part of ${DEPLOYMENT_NAME}" { - run gcloud spanner databases list --instance test-myspannercluster-"${RAND}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "spannerdb1" ]] -} - -@test "Verifying that Spanner DB IAM was created as part of ${DEPLOYMENT_NAME}" { - run gcloud spanner databases get-iam-policy spannerdb1 --instance test-myspannercluster-"${RAND}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "role: roles/spanner.databaseAdmin" ]] -} - -@test "Deleting deployment" { - run gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -q - [[ "$status" -eq 0 ]] - - run gcloud spanner instances list \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ ! "$output" =~ "test-myspannercluster-${RAND}" ]] -} diff --git a/dm/templates/cloud_spanner/tests/integration/cloud_spanner.yaml b/dm/templates/cloud_spanner/tests/integration/cloud_spanner.yaml deleted file mode 100644 index 81782b83ea3..00000000000 --- a/dm/templates/cloud_spanner/tests/integration/cloud_spanner.yaml +++ /dev/null @@ -1,19 +0,0 @@ -# Test of the Cloud Spanner template. -# -# Variables: -# RAND: A random string used by the testing suite. -# - -imports: -- path: templates/cloud_spanner/cloud_spanner.py - name: cloud_spanner.py - -resources: - - name: test-myspannercluster-${RAND} - type: cloud_spanner.py - properties: - displayName: "Spanner Cluster 1" - nodeCount: 2 - instanceConfig: nam3 - databases: - - name: "spannerdb1" diff --git a/dm/templates/cloud_spanner/tests/schemas/invalid_additional_options.yaml b/dm/templates/cloud_spanner/tests/schemas/invalid_additional_options.yaml deleted file mode 100644 index 4f26d755b10..00000000000 --- a/dm/templates/cloud_spanner/tests/schemas/invalid_additional_options.yaml +++ /dev/null @@ -1,4 +0,0 @@ -displayName: "Spanner Cluster 1" -nodeCount: 2 -instanceConfig: nam3 -foo: bar diff --git a/dm/templates/cloud_spanner/tests/schemas/valid_basic.yaml b/dm/templates/cloud_spanner/tests/schemas/valid_basic.yaml deleted file mode 100644 index feba6ca8c69..00000000000 --- a/dm/templates/cloud_spanner/tests/schemas/valid_basic.yaml +++ /dev/null @@ -1,5 +0,0 @@ -displayName: "Spanner Cluster 1" -nodeCount: 2 -instanceConfig: nam3 -name: foo -project: foo diff --git a/dm/templates/cloud_spanner/tests/schemas/valid_complex.yaml b/dm/templates/cloud_spanner/tests/schemas/valid_complex.yaml deleted file mode 100644 index 7dc0c9de919..00000000000 --- a/dm/templates/cloud_spanner/tests/schemas/valid_complex.yaml +++ /dev/null @@ -1,5 +0,0 @@ -displayName: "Spanner Cluster 1" -nodeCount: 2 -instanceConfig: nam3 -databases: - - name: "spannerdb1" \ No newline at end of file diff --git a/dm/templates/cloud_sql/README.md b/dm/templates/cloud_sql/README.md deleted file mode 100644 index 82b9da2d8a0..00000000000 --- a/dm/templates/cloud_sql/README.md +++ /dev/null @@ -1,91 +0,0 @@ -# Cloud SQL - -This template creates a Cloud SQL instance with databases and users. - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) -- Enable the [Cloud SQL API](https://cloud.google.com/sql/docs/mysql/admin-api/) -- Enable the [Cloud SQL Admin API](https://cloud.google.com/sql/docs/mysql/admin-api/) -- Grant the [roles/cloudsql.admin](https://cloud.google.com/sql/docs/mysql/project-access-control) - IAM role to the Deployment Manager service account - -## Deployment - -### Resources - -- [sqladmin.v1beta4.instance](https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances) -- [sqladmin.v1beta4.database](https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/databases) -- [sqladmin.v1beta4.user](https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/users) - -### Properties - -See the `properties` section in the schema file(s): - -- [Cloud SQL](cloud_sql.py.schema) - -### Usage - -1. Clone the [Deployment Manager samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit): - - ```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit - ``` - -2. Go to the [dm](../../) directory: - - ```shell - cd dm - ``` - -3. Copy the example DM config to be used as a model for the deployment; in this - case, [examples/cloud\_sql.yaml](examples/cloud_sql.yaml): - - ```shell - cp templates/cloud_sql/examples/cloud_sql.yaml my_cloud_sql.yaml - ``` - -4. Change the values in the config file to match your specific GCP setup (for - properties, refer to the schema files listed above): - - ```shell - vim my_cloud_sql.yaml # <== change values to match your GCP setup - ``` - -5. Create your deployment (replace \ with the relevant - deployment name): - - ```shell - gcloud deployment-manager deployments create \ - --config my_cloud_sql.yaml - ``` - - To deploy with CFT: - - ```shell - cft apply my_cloud_sql.yaml - ``` - -6. In case you need to delete your deployment: - - ```shell - gcloud deployment-manager deployments delete - ``` - - To delete deployment with CFT: - - ```shell - cft delete my_cloud_sql.yaml - ``` - -`Notes:` After a Cloud SQL instance is deleted, its name cannot be reused for -up to 7 days. - -## Examples - -- [Cloud SQL](examples/cloud_sql.yaml) -- [Cloud SQL with Read Replica](examples/cloud_sql_read_replica.yaml) -- [Cloud SQL Postgres](examples/cloud_sql_postgres.yaml) -- [Cloud SQL MSSQL Server](examples/cloud_sql_mssql.yaml) -- [Cloud SQL Private Networking](examples/cloud_sql_private_network.yaml) diff --git a/dm/templates/cloud_sql/cloud_sql.py b/dm/templates/cloud_sql/cloud_sql.py deleted file mode 100644 index 452181e5299..00000000000 --- a/dm/templates/cloud_sql/cloud_sql.py +++ /dev/null @@ -1,251 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" This template creates a Cloud SQL Instance with databases and users. """ - -import collections - -DMBundle = collections.namedtuple('DMBundle', 'resource outputs') - - -def set_optional_property(receiver, source, property_name): - """ If set, copies the given property value from one object to another. """ - - if property_name in source: - receiver[property_name] = source[property_name] - - -def get_instance(res_name, project_id, properties): - """ Creates a Cloud SQL instance. """ - - name = res_name - instance_properties = { - 'region': properties['region'], - 'project': project_id, - 'name': name - } - - optional_properties = [ - 'databaseVersion', - 'failoverReplica', - 'instanceType', - 'masterInstanceName', - 'maxDiskSize', - 'onPremisesConfiguration', - 'replicaConfiguration', - 'serverCaCert', - 'serviceAccountEmailAddress', - 'settings', - 'rootPassword', - ] - - for prop in optional_properties: - set_optional_property(instance_properties, properties, prop) - - instance = { - 'name': name, - # https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances - 'type': 'gcp-types/sqladmin-v1beta4:instances', - 'properties': instance_properties - } - - if 'dependsOn' in properties: - instance['metadata'] = {'dependsOn': properties['dependsOn']} - - output_fields = [ - 'name', - 'selfLink', - 'backendType', - ] - - if 'onPremisesConfiguration' not in properties: - output_fields.extend(['gceZone','connectionName','serviceAccountEmailAddress']) - - outputs = [{ - 'name': i, - 'value': '$(ref.{}.{})'.format(name, i) - } for i in output_fields] - - # Regrettably, 'ipAddress' is a special snowflake. 'ipAddresses' is a list - # of objects, and DM doesn't seem to let you extract child properties from - # outputs of imported templates. If we want to use the actual IP address of - # the instantiated database in a template that uses this template, we need - # to navigate to the relevant child value here. - if 'onPremisesConfiguration' not in properties: - outputs += [{ - 'name': 'ipAddress', - 'value': '$(ref.{}.ipAddresses[0].ipAddress)'.format(name), - }] - - return DMBundle(instance, outputs) - - -def get_database(instance_name, project_id, properties, res_name): - """ Creates a Cloud SQL database. """ - - name = properties['name'] - res_name = '{}-{}'.format(res_name, name) - - db_properties = { - 'name': name, - 'project': project_id, - 'instance': instance_name - } - - optional_properties = [ - 'charset', - 'collation', - 'instance', - ] - - for prop in optional_properties: - set_optional_property(db_properties, properties, prop) - - database = { - 'name': res_name, - # https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/databases - 'type': 'gcp-types/sqladmin-v1beta4:databases', - 'properties': db_properties - } - - outputs = [ - { - 'name': 'name', - 'value': '$(ref.{}.name)'.format(res_name) - }, - { - 'name': 'selfLink', - 'value': '$(ref.{}.selfLink)'.format(res_name) - } - ] - - return DMBundle(database, outputs) - - -def get_databases(instance_name, project_id, properties, res_name): - """ Creates Cloud SQL databases for the given instance. """ - - dbs = properties.get('databases') - if dbs: - return [get_database(instance_name, project_id, db, res_name) for db in dbs] - - return [] - - -def get_user(instance_name, project_id, properties, res_name): - """ Creates a Cloud SQL user. """ - - name = properties['name'] - res_name = '{}-user-{}'.format(res_name, name) - if 'host' in properties: - res_name = '{}-{}'.format(res_name, properties['host'].replace('cloudsqlproxy~', 'proxy_').replace('.', '_')) - - user_properties = { - 'name': name, - 'project': project_id, - 'instance': instance_name, - } - - for prop in ['host', 'password']: - set_optional_property(user_properties, properties, prop) - - user = { - 'name': res_name, - # https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/users - 'type': 'gcp-types/sqladmin-v1beta4:users', - 'properties': user_properties - } - - outputs = [{'name': 'name', 'value': res_name}] - - return DMBundle(user, outputs) - - -def get_users(instance_name, project_id, properties, res_name): - """ Creates Cloud SQL users for the given instance. """ - - users = properties.get('users') - if users: - return [get_user(instance_name, project_id, user, res_name) for user in users] - - return [] - - -def create_sequentially(resources): - """ - Sets up the resources' metadata in such a way that the resources are - created sequentially. - """ - - if resources and len(resources) > 1: - previous = resources[0] - for current in resources[1:]: - previous_name = previous['name'] - current['metadata'] = {'dependsOn': [previous_name]} - previous = current - - -def consolidate_outputs(bundles, prefix): - """ - Consolidates values of multiple outputs into one array for the new - output. - """ - - res = {} - outputs = [output for bundle in bundles for output in bundle.outputs] - for output in outputs: - output_name = output['name'] - new_name = prefix + output_name[0].upper() + output_name[1:] + 's' - if not new_name in res: - res[new_name] = {'name': new_name, 'value': []} - res[new_name]['value'].append(output['value']) - - # We sort the output by key to guarantee deterministic results. This makes - # DM's Python3 compatibility checker less grumpy. - return [value for _, value in sorted(res.items())] - - -def get_resource_names_output(resources): - """ - Creates the output dict with the names of all resources to be created. - """ - - names = [resource['name'] for resource in resources] - - return {'name': 'resources', 'value': names} - - -def generate_config(context): - """ Creates the Cloud SQL instance, databases, and user. """ - - properties = context.properties - res_name = properties.get('name', context.env['name']) - project_id = properties.get('project', context.env['project']) - - instance = get_instance(res_name, project_id, properties) - instance_name = instance.outputs[0]['value'] # 'name' output - - users = get_users(instance_name, project_id, properties, res_name) - dbs = get_databases(instance_name, project_id, properties, res_name) - - children = [user.resource for user in users] + [db.resource for db in dbs] - create_sequentially(children) - - user_outputs = consolidate_outputs(users, 'user') - db_outputs = consolidate_outputs(dbs, 'database') - - resources = [instance.resource] + children - outputs = [get_resource_names_output(resources)] + instance.outputs + \ - db_outputs + user_outputs - - return {'resources': resources, 'outputs': outputs} diff --git a/dm/templates/cloud_sql/cloud_sql.py.schema b/dm/templates/cloud_sql/cloud_sql.py.schema deleted file mode 100644 index 19bb7319602..00000000000 --- a/dm/templates/cloud_sql/cloud_sql.py.schema +++ /dev/null @@ -1,576 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Cloud SQL - author: Sourced Group Inc. - version: 1.1.1 - description: | - Supports creation of a Cloud SQL instance with database and user resources. - For more information, see https://cloud.google.com/sql/docs/. - -additionalProperties: false - -required: - - region - -properties: - name: - type: string - description: | - The name of the Cloud SQL instance. This does not include the project ID. - Resource name would be used if omitted. - project: - type: string - description: | - The project ID of the project containing the Cloud SQL instance. The - Google apps domain is prefixed if applicable. - databaseVersion: - type: string - description: | - The database engine type and version. - The databaseVersion field can not be changed after instance creation. - MySQL Second Generation instances: MYSQL_8_0, MYSQL_5_7 (default) or MYSQL_5_6. - PostgreSQL instances: POSTGRES_9_6 (default), or POSTGRES_10, or POSTGRES_11 Beta, or POSTGRES_12. - SQL Server instances: SQLSERVER_2017_STANDARD (default), SQLSERVER_2017_ENTERPRISE, SQLSERVER_2017_EXPRESS, or SQLSERVER_2017_WEB. - enum: - - MYSQL_5_6 - - MYSQL_5_7 - - MYSQL_8_0 - - POSTGRES_9_6 - - POSTGRES_10 - - POSTGRES_11 - - POSTGRES_12 - - SQLSERVER_2017_STANDARD - - SQLSERVER_2017_ENTERPRISE - - SQLSERVER_2017_EXPRESS - - SQLSERVER_2017_WEB - failoverReplica: - type: object - additionalProperties: false - description: | - The name and status of the failover replica. Applicable only to Second - Generation instances. - required: - - name - properties: - name: - type: string - description: | - The name of the failover replica. If specified at instance creation, - a failover replica is created for the instance. The name does not - include the project ID. Applicable only to Second Generation - instances. - instanceType: - type: string - description: | - The instance type. Can be one of the following: - CLOUD_SQL_INSTANCE - a Cloud SQL instance that is not replicating from a - master - ON_PREMISES_INSTANCE - an instance running on the customer's premises - READ_REPLICA_INSTANCE - a Cloud SQL instance configured as a read-replica - enum: - - CLOUD_SQL_INSTANCE - - ON_PREMISES_INSTANCE - - READ_REPLICA_INSTANCE - masterInstanceName: - type: string - description: | - The name of the instance to act as master in the replication setup. - maxDiskSize: - type: number - description: The maximum disk size of the instance in bytes. - onPremisesConfiguration: - type: object - additionalProperties: false - description: | - Configuration specific to on-premises instances. - requires: - - hostPort - - kind - properties: - hostPort: - type: string - description: | - The host and port of the on-premises instance in the host:port - format. - kind: - type: string - const: "sql#onPremisesConfiguration" - description: | - This is always sql#onPremisesConfiguration. - replicaConfiguration: - type: object - additionalProperties: false - description: | - Configuration specific to failover replicas and read replicas. - properties: - failoverTarget: - type: boolean - description: | - Defines whether the replica is the failover target. If True, the - replica is designated as a failover replica. In case the master - instance fails, the replica instance will be made the new master - instance. Only one replica can be specified as a failover target, - and that replica must be in a zone different from that of the master - instance. - mysqlReplicaConfiguration: - type: object - additionalProperties: false - description: | - MySQL-specific configuration when replicating from an MySQL - on-premises master. Replication configuration information such as the - username, password, certificates, and keys, are not stored in the - instance metadata. The configuration information is used only to set - up the replication connection, and is stored by MySQL in a file named - master.info in the data directory. - properties: - caCertificate: - type: string - description: | - The PEM representation of the trusted CA's x509 certificate. - clientCertificate: - type: string - description: | - The PEM representation of the slave's x509 certificate. - clientKey: - type: string - description: | - The PEM representation of the slave's private key. The - corresponding public key is encoded in the client's certificate. - connectRetryInterval: - type: number - description: | - The number of seconds to wait between connect retries. MySQL's - default is 60 seconds. - dumpFilePath: - type: string - description: | - The path to the SQL dump file in Google Cloud Storage from which - the slave instance is to be created. The URI is in the - gs://bucketName/fileName form. Compressed gzip files (.gz) are - also supported. Dumps should have the binlog coordinates from - which replication should begin. This can be accomplished by - setting --master-data to 1 when using mysqldump. - masterHeartbeatPeriod: - type: number - description: | - The interval between replication heartbeats (in milliseconds). - password: - type: string - description: | - The password for the replication connection. - sslCipher: - type: string - description: | - The list of permissible ciphers to use for SSL encryption. - username: - type: string - description: | - The username for the replication connection. - verifyServerCertificate: - type: boolean - description: | - Defines whether or not to check the master's Common Name value in - the certificate that the master sends during the SSL handshake. - serverCaCert: - type: object - description: SSL configuration. - serviceAccountEmailAddress: - type: string - description: | - The service account email address assigned to the instance. This property is applicable only - to Second Generation instances. - rootPassword: - type: string - description: MSSQL root password - settings: - type: object - additionalProperties: false - description: User settings. - required: - - tier - properties: - activationPolicy: - type: string - description: | - Defines when the instance is activated; applicable only when the - instance state is RUNNABLE. Valid values: - ALWAYS: The instance is on, and remains so even in the absence of - connection requests. - NEVER: The instance is off; it is not activated, even if a - connection request arrives. - ON_DEMAND: The instance responds to incoming requests, and turns - itself off when not in use. Instances with PER_USE pricing turn off - after 15 minutes of inactivity. Instances with PER_PACKAGE pricing - turn off after 12 hours of inactivity. For First Generation - instances only. - enum: - - ALWAYS - - NEVER - - ON_DEMAND - authorizedGaeApplications: - type: array - description: | - App Engine app IDs that can access the instance. For First Generation - instances only. - items: - type: string - availabilityType: - type: string - description: | - The availability type (PostgreSQL instances only). Potential values: - ZONAL: The instance serves data from only one zone. Outages in that - zone affect data accessibility. - REGIONAL: The instance can serve data from more than one zone in a - region (it is highly available). - enum: - - ZONAL - - REGIONAL - backupConfiguration: - type: object - additionalProperties: false - description: The daily backup configuration for the instance. - properties: - binaryLogEnabled: - type: boolean - description: | - Defines whether binary log is enabled. If backup configuration is - disabled, binary log must be disabled as well. - enabled: - type: boolean - description: Defines whether this configuration is enabled. - replicationLogArchivingEnabled: - type: boolean - description: Reserved for future use. - startTime: - type: string - description: | - The start time for the daily backup configuration in UTC timezone - in the 24 hour format (HH:MM). - crashSafeReplicationEnabled: - type: boolean - description: | - Defines whether database flags for crash-safe replication are - enabled. Applicable to First Generation instances. Configuration - specific to read replica instances. - dataDiskSizeGb: - type: number - description: | - The size of the data disk, in GB. The data disk size minimum is 10GB. - Not used for First Generation instances. - dataDiskType: - type: string - default: PD_SSD - description: | - The type of the data disk. Not used for First Generation instances. - enum: - - PD_SSD - - PD_HDD - databaseFlags: - type: array - description: | - Database flags passed to the instance at startup. - items: - type: object - additionalProperties: false - properties: - name: - type: string - description: | - The name of the flag. These flags are passed at instance - startup, and they include both server options and system - variables for MySQL. Flags should be specified with - underscores, not hyphens. For more information, see - Configuring Database Flags in the Cloud SQL documentation. - value: - type: string - description: | - The flag value. Booleans must be set to "on" for True - and to "off" for False. This field must be omitted if the flag - does not take a value. - databaseReplicationEnabled: - type: boolean - description: | - Indicates whether replication is enabled or not. Configuration - specific to read replica instances. Writable. - ipConfiguration: - type: object - additionalProperties: false - description: | - Settings for IP Management. This allows to enable or disable the - instance IP and to define which external networks can connect to the - instance. The IPv4 address cannot be disabled for Second Generation - instances. - properties: - authorizedNetworks: - type: array - description: | - The list of external networks that are allowed to connect to the - instance using its IP. In CIDR notation, also known as 'slash' - notation (e.g., 192.168.100.0/24). Writable. - items: - type: object - additionalProperties: false - properties: - expirationTime: - type: string - description: | - The time when the access control entry expires in the RFC - 3339 format; for example, 2012-11-15T16:19:00.094Z. - name: - type: string - description: | - An optional label to identify the entry. - value: - type: string - description: | - The whitelisted value for the access control list. For - example, to grant access to a client from an external IP - (IPv4 or IPv6) address or subnet, use that address or - subnet here. - ipv4Enabled: - type: boolean - description: | - Defines wether the instance should be assigned an IP address. - privateNetwork: - type: string - description: | - The resource link for the VPC network from which the Cloud SQL - instance is accessible for private IP. For example, - /projects/myProject/global/networks/default. The value can be - updated but it cannot be removed after it has been set. - requireSsl: - type: boolean - description: | - Defines whether SSL connections over IP must be enforced. - locationPreference: - type: object - additionalProperties: false - description: | - Location preference settings. This allows the instance to be - located as near as possible to either an App Engine app or Compute - Engine zone for better performance. App Engine co-location is only - applicable to First Generation instances. - properties: - followGaeApplication: - type: string - description: | - The App Engine application to follow. The application must be in - the same region as the Cloud SQL instance. - zone: - type: string - description: | - The preferred Compute Engine zone (e.g., us-central1-a, - us-central1-b, etc.). - maintenanceWindow: - type: object - additionalProperties: false - description: | - The maintenance window for the instance. Specifies when the instance - can be restarted for maintenance purposes. Not used for First - Generation instances. - properties: - day: - type: number - description: The day of the week (1-7, starting Monday). - hour: - type: integer - description: The hour of the day (0 to 23). - updateTrack: - type: string - description: | - The maintenance timing: canary (Earlier) or stable (Later). - pricingPlan: - type: string - description: | - The pricing plan for the instance: PER_USE or PACKAGE. Only PER_USE - is supported for Second Generation instances. - enum: - - PER_USE - - PACKAGE - replicationType: - type: string - description: | - The type of replication the instance uses: ASYNCHRONOUS or SYNCHRONOUS. - enum: - - ASYNCHRONOUS - - SYNCHRONOUS - settingsVersion: - type: number - description: | - The version of the instance settings. This is a required field for - the update method to make sure that concurrent updates are handled - properly. During update, use the most recent settingsVersion value - for the instance and do not try to update that value. - storageAutoResize: - type: boolean - default: true - description: | - Defines whether the storage size can be increased automatically. - Not used for First Generation instances. - storageAutoResizeLimit: - type: number - default: 0 - description: | - The maximum size to which the storage capacity can be automatically - increased. The default value is 0, which specifies that there is no - limit. Not used for First Generation instances. - tier: - type: string - description: | - The tier (or machine type) for the instance. For example, - db-n1-standard-1 (MySQL instances) or db-custom-1-3840 (PostgreSQL - instances). For MySQL instances, determines whether the instance is - First or Second Generation. - userLabels: - type: object - description: | - User-provided labels - a dictionary where each label is a single - key-value pair. - region: - type: string - description: | - The geographical region. Defaults to us-central or us-central1 depending - on the instance type (First Generation or Second Generation/PostgreSQL). - For a complete list of valid values, see Instance Locations. The region - cannot be changed after instance creation. - See https://cloud.google.com/sql/docs/mysql/locations - enum: - - northamerica-northeast1 - - us-central - - us-central1 - - us-east1 - - us-east4 - - us-west1 - - us-west2 - - southamerica-east1 - - europe-north1 - - europe-west1 - - europe-west2 - - europe-west3 - - europe-west4 - - europe-west6 - - asia-east1 - - asia-east2 - - asia-northeast1 - - asia-northeast2 - - asia-south1 - - asia-southeast1 - - australia-southeast1 - databases: - type: array - description: SQL Databases to create in the new instance. - required: - - name - items: - type: object - additionalProperties: false - properties: - name: - type: string - description: The database name. - charset: - type: string - default: utf8 - description: The character set to use for the database. - collation: - type: string - description: The collation to use for the database. - users: - type: array - description: Cloud SQL users to create in the new instance. - items: - type: object - additionalProperties: false - required: - - name - properties: - name: - type: string - description: The name of the user in the Cloud SQL instance. - host: - type: string - description: | - The name of the host from which the user can connect. - The host name cannot be updated after insertion. - password: - type: string - description: The user password. - dependsOn: - type: array - description: | - The list of the resources that must be created before this instance can - be created. Because Cloud SQL resources must be created sequentially, - this property can be used to postpone the instance creation. For example: - dependsOn: - - resource-name-1 - - resource-name-2 - or if it't neccessary to wait for another instance of cloud_sql template - to finish when, e.g. building a read replica: - dependsOn: $(ref.master-instance-of-cloud-sql.resources) - items: - type: string - description: The resource name. - -outputs: - name: - type: string - description: The resource name. - selfLink: - type: string - description: | - The URL (SelfLink) of the Cloud SQL instance resource. - gceZone: - type: string - description: | - The Compute Engine zone the instance is currently serving from. - This value could be different from the zone that was specified when - the instance was created if the instance has failed over to its - secondary zone. - connectionName: - type: string - description: | - The connection name of the Cloud SQL instance (used in connection - strings). - backendType: - type: string - description: Database generation. - ipAddress: - type: string - description: | - The first IP address assigned to the instance. - userNames: - type: array - description: The names of the created users. - databaseNames: - type: array - description: The names of the created databases. - databaseSelfLinks: - type: array - description: | - The URLs (SelfLinks) of the Cloud SQL database resources. - resources: - type: array - description: | - Names of the resources the template creates. Because Cloud SQL - usually requires resources to be created sequentially, this output - can be used as synchronization context. - -documentation: - - templates/cloud_sql/README.md - -examples: - - templates/cloud_sql/examples/cloud_sql.yaml - - templates/cloud_sql/examples/cloud_sql_read_replica.yaml diff --git a/dm/templates/cloud_sql/examples/cloud_sql.yaml b/dm/templates/cloud_sql/examples/cloud_sql.yaml deleted file mode 100644 index 678b50d9380..00000000000 --- a/dm/templates/cloud_sql/examples/cloud_sql.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Example of the Cloud SQL template usage. - -imports: - - path: templates/cloud_sql/cloud_sql.py - name: cloud_sql.py - -resources: - - name: cloud-sql-instance - type: cloud_sql.py - properties: - region: us-central1 - settings: - tier: db-n1-standard-1 - backupConfiguration: - startTime: '02:00' - enabled: true - binaryLogEnabled: true - locationPreference: - zone: us-central1-c - users: - - name: user-1 - host: 10.1.1.1 - - name: user-2 - host: 10.1.1.2 - databases: - - name: db-1 - - name: db-2 diff --git a/dm/templates/cloud_sql/examples/cloud_sql_mssql.yaml b/dm/templates/cloud_sql/examples/cloud_sql_mssql.yaml deleted file mode 100644 index d0fe8c405be..00000000000 --- a/dm/templates/cloud_sql/examples/cloud_sql_mssql.yaml +++ /dev/null @@ -1,30 +0,0 @@ -# Example of the Cloud SQL template usage. -# Note that MSSQL Server instance uses rootPassword. - -imports: - - path: templates/cloud_sql/cloud_sql.py - name: cloud_sql.py - -resources: - - name: cloud-sql-mssql-instance - type: cloud_sql.py - properties: - region: us-central1 - databaseVersion: SQLSERVER_2017_STANDARD - instanceType: CLOUD_SQL_INSTANCE - rootPassword: changeit - settings: - tier: db-custom-2-3840 - backupConfiguration: - startTime: '02:00' - enabled: true - locationPreference: - zone: us-central1-a - users: - - name: user-1 - host: 10.1.1.1 - - name: user-2 - host: 10.1.1.2 - databases: - - name: db-1 - - name: db-2 diff --git a/dm/templates/cloud_sql/examples/cloud_sql_postgres.yaml b/dm/templates/cloud_sql/examples/cloud_sql_postgres.yaml deleted file mode 100644 index ffac1269726..00000000000 --- a/dm/templates/cloud_sql/examples/cloud_sql_postgres.yaml +++ /dev/null @@ -1,29 +0,0 @@ -# Example of the Cloud SQL template usage. -# Note that Postgres instances use different instance types then MySQL instances - -imports: - - path: templates/cloud_sql/cloud_sql.py - name: cloud_sql.py - -resources: - - name: cloud-sql-postgres-instance - type: cloud_sql.py - properties: - region: us-central1 - databaseVersion: POSTGRES_9_6 - instanceType: CLOUD_SQL_INSTANCE - settings: - tier: db-f1-micro - backupConfiguration: - startTime: '02:00' - enabled: true - locationPreference: - zone: us-central1-c - users: - - name: user-1 - password: dummy2 - - name: user-2 - password: dummy3 - databases: - - name: db-1 - - name: db-2 diff --git a/dm/templates/cloud_sql/examples/cloud_sql_private_network.yaml b/dm/templates/cloud_sql/examples/cloud_sql_private_network.yaml deleted file mode 100644 index 372c35010d9..00000000000 --- a/dm/templates/cloud_sql/examples/cloud_sql_private_network.yaml +++ /dev/null @@ -1,32 +0,0 @@ -# Example of the Cloud SQL template usage. -# Replace 'your-vpc-network-name' with the name of the vpc -# in which the Cloud SQL instance will be deployed. - -imports: - - path: templates/cloud_sql/cloud_sql.py - name: cloud_sql.py - -resources: - - name: cloud-sql-instance - type: cloud_sql.py - properties: - region: us-central1 - settings: - tier: db-n1-standard-1 - backupConfiguration: - startTime: '02:00' - enabled: true - binaryLogEnabled: true - locationPreference: - zone: us-central1-c - ipConfiguration: - ipv4Enabled: false - privateNetwork: 'your-vpc-network-name' - users: - - name: user-1 - host: 10.1.1.1 - - name: user-2 - host: 10.1.1.2 - databases: - - name: db-1 - - name: db-2 diff --git a/dm/templates/cloud_sql/examples/cloud_sql_read_replica.yaml b/dm/templates/cloud_sql/examples/cloud_sql_read_replica.yaml deleted file mode 100644 index bc0ce55f4d0..00000000000 --- a/dm/templates/cloud_sql/examples/cloud_sql_read_replica.yaml +++ /dev/null @@ -1,41 +0,0 @@ -# Example of the Cloud SQL template usage. -# -# This example creates a Cloud SQL instance with a read replica. - -imports: - - path: templates/cloud_sql/cloud_sql.py - name: cloud_sql.py - -resources: - - name: cloud-sql-master-instance - type: cloud_sql.py - properties: - region: us-central1 - settings: - tier: db-n1-standard-1 - backupConfiguration: - startTime: '02:00' - enabled: true - binaryLogEnabled: true - locationPreference: - zone: us-central1-c - users: - - name: user-1 - host: 10.1.1.1 - databases: - - name: db-1 - - - name: cloud-sql-read-replica-instance - type: cloud_sql.py - properties: - region: us-central1 - instanceType: READ_REPLICA_INSTANCE - settings: - tier: db-n1-standard-1 - locationPreference: - zone: us-central1-a - masterInstanceName: $(ref.cloud-sql-master-instance.name) - # Wait until all the resources required by the master instance had been - # created. - dependsOn: - - $(ref.cloud-sql-master-instance.resources) diff --git a/dm/templates/cloud_sql/tests/integration/cloud_sql.bats b/dm/templates/cloud_sql/tests/integration/cloud_sql.bats deleted file mode 100755 index 3bf1c6e7cd2..00000000000 --- a/dm/templates/cloud_sql/tests/integration/cloud_sql.bats +++ /dev/null @@ -1,167 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" - export MASTER_INSTANCE_NAME="cloud-sql-master-instance-${RAND}" - export VERSION="MYSQL_5_6" - export MASTER_INSTANCE_TIER="db-n1-standard-1" - export MASTER_ZONE="us-central1-c" - export REPLICA_ZONE="us-central1-a" - export REGION="us-central1" - export REPLICA_INSTANCE_NAME="cloud-sql-replica-instance-${RAND}" - export REPLICA_INSTANCE_TIER="db-n1-standard-2" - export REPLICA_INSTANCE_TYPE="READ_REPLICA_INSTANCE" - export BACKUP_START_TIME="02:00" - export BACKUP_ENABLED="true" - export BACKUP_BL_ENABLED="true" - export USER1_NAME="user-1" - export USER1_HOST="10.1.1.1" - export USER2_NAME="user-2" - export USER2_HOST="10.1.1.2" - export DB1="db-1" - export DB2="db-2" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - envsubst < ${BATS_TEST_DIRNAME}/${TEST_NAME}.yaml > "${CONFIG}" -} - -function delete_config() { - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - rm -f "${RANDOM_FILE}" - delete_config - fi - - # Per-test teardown steps. -} - - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - run gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config "${CONFIG}" --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] -} - -@test "Verifying that both instances were created" { - run gcloud sql instances list \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${MASTER_INSTANCE_NAME}" ]] - [[ "$output" =~ "${REPLICA_INSTANCE_NAME}" ]] -} - -@test "Verifying master instance" { - run gcloud sql instances describe ${MASTER_INSTANCE_NAME} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${VERSION}" ]] - [[ "$output" =~ "${MASTER_INSTANCE_TIER}" ]] - [[ "$output" =~ "instanceType: CLOUD_SQL_INSTANCE" ]] - [[ "$output" =~ "region: ${REGION}" ]] - [[ "$output" =~ "${MASTER_ZONE}" ]] -} - -@test "Verifying master replica list" { - run gcloud sql instances describe ${MASTER_INSTANCE_NAME} \ - --format="yaml(replicaNames)" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${REPLICA_INSTANCE_NAME}" ]] -} - -@test "Verifying master database list" { - run gcloud sql databases list --instance ${MASTER_INSTANCE_NAME} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${DB1}" ]] - [[ "$output" =~ "${DB2}" ]] -} - -@test "Verifying master user list" { - run gcloud sql users list --instance ${MASTER_INSTANCE_NAME} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${USER1_NAME}" ]] - [[ "$output" =~ "${USER2_NAME}" ]] - [[ "$output" =~ "${USER1_HOST}" ]] - [[ "$output" =~ "${USER2_HOST}" ]] -} - -@test "Verifying master backup settings" { - run gcloud sql instances describe ${MASTER_INSTANCE_NAME} \ - --format="yaml(settings.backupConfiguration)" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "binaryLogEnabled: ${BACKUP_BL_ENABLED}" ]] - [[ "$output" =~ "enabled: ${BACKUP_ENABLED}" ]] - [[ "$output" =~ "startTime: ${BACKUP_START_TIME}" ]] -} - -@test "Verifying replica instance" { - run gcloud sql instances describe ${REPLICA_INSTANCE_NAME} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${VERSION}" ]] - [[ "$output" =~ "${REPLICA_INSTANCE_TIER}" ]] - [[ "$output" =~ "${REPLICA_INSTANCE_TYPE}" ]] - [[ "$output" =~ "region: ${REGION}" ]] - [[ "$output" =~ "${REPLICA_ZONE}" ]] - [[ "$output" =~ "${MASTER_INSTANCE_NAME}" ]] -} - -@test "Deleting deployment" { - run gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" -q \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] -} diff --git a/dm/templates/cloud_sql/tests/integration/cloud_sql.yaml b/dm/templates/cloud_sql/tests/integration/cloud_sql.yaml deleted file mode 100644 index 52f5c04d1ca..00000000000 --- a/dm/templates/cloud_sql/tests/integration/cloud_sql.yaml +++ /dev/null @@ -1,41 +0,0 @@ -# Test of the Cloud SQL template. - -imports: - - path: templates/cloud_sql/cloud_sql.py - name: cloud_sql.py - -resources: - - name: ${MASTER_INSTANCE_NAME} - type: cloud_sql.py - properties: - databaseVersion: ${VERSION} - region: ${REGION} - settings: - tier: ${MASTER_INSTANCE_TIER} - backupConfiguration: - startTime: ${BACKUP_START_TIME} - enabled: ${BACKUP_ENABLED} - binaryLogEnabled: ${BACKUP_BL_ENABLED} - locationPreference: - zone: ${MASTER_ZONE} - users: - - name: ${USER1_NAME} - host: ${USER1_HOST} - - name: ${USER2_NAME} - host: ${USER2_HOST} - databases: - - name: ${DB1} - - name: ${DB2} - - - name: ${REPLICA_INSTANCE_NAME} - type: cloud_sql.py - properties: - databaseVersion: ${VERSION} - region: ${REGION} - instanceType: ${REPLICA_INSTANCE_TYPE} - settings: - tier: ${REPLICA_INSTANCE_TIER} - locationPreference: - zone: ${REPLICA_ZONE} - masterInstanceName: $(ref.${MASTER_INSTANCE_NAME}.name) - dependsOn: $(ref.${MASTER_INSTANCE_NAME}.resources) diff --git a/dm/templates/cloud_sql/tests/integration/cloud_sql_mssql.bats b/dm/templates/cloud_sql/tests/integration/cloud_sql_mssql.bats deleted file mode 100644 index fb7292a1bc0..00000000000 --- a/dm/templates/cloud_sql/tests/integration/cloud_sql_mssql.bats +++ /dev/null @@ -1,136 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" - export MASTER_INSTANCE_NAME="cloud-sql-mssql-instance-${RAND}" - export VERSION="SQLSERVER_2017_STANDARD" - export MASTER_INSTANCE_TIER="db-custom-2-3840" - export MASTER_ZONE="us-central1-c" - export REGION="us-central1" - export BACKUP_START_TIME="02:00" - export BACKUP_ENABLED="true" - export BACKUP_BL_ENABLED="true" - export ROOT_PASSWORD="changeit" - export USER1_NAME="user-1" - export USER1_HOST="10.1.1.1" - export USER2_NAME="user-2" - export USER2_HOST="10.1.1.2" - export DB1="db-1" - export DB2="db-2" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - envsubst < ${BATS_TEST_DIRNAME}/${TEST_NAME}.yaml > "${CONFIG}" -} - -function delete_config() { - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - rm -f "${RANDOM_FILE}" - delete_config - fi - - # Per-test teardown steps. -} - - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - run gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config "${CONFIG}" --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] -} - -@test "Verifying that both instances were created" { - run gcloud sql instances list \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${MASTER_INSTANCE_NAME}" ]] -} - -@test "Verifying master instance" { - run gcloud sql instances describe ${MASTER_INSTANCE_NAME} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${VERSION}" ]] - [[ "$output" =~ "${MASTER_INSTANCE_TIER}" ]] - [[ "$output" =~ "instanceType: CLOUD_SQL_INSTANCE" ]] - [[ "$output" =~ "region: ${REGION}" ]] - [[ "$output" =~ "${MASTER_ZONE}" ]] -} - -@test "Verifying master database list" { - run gcloud sql databases list --instance ${MASTER_INSTANCE_NAME} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${DB1}" ]] - [[ "$output" =~ "${DB2}" ]] -} - -@test "Verifying master user list" { - run gcloud sql users list --instance ${MASTER_INSTANCE_NAME} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${USER1_NAME}" ]] - [[ "$output" =~ "${USER2_NAME}" ]] -} - -@test "Verifying master backup settings" { - run gcloud sql instances describe ${MASTER_INSTANCE_NAME} \ - --format="yaml(settings.backupConfiguration)" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "enabled: ${BACKUP_ENABLED}" ]] - [[ "$output" =~ "startTime: ${BACKUP_START_TIME}" ]] -} - -@test "Deleting deployment" { - run gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" -q \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] -} diff --git a/dm/templates/cloud_sql/tests/integration/cloud_sql_mssql.yaml b/dm/templates/cloud_sql/tests/integration/cloud_sql_mssql.yaml deleted file mode 100644 index 2e06ce7a3fa..00000000000 --- a/dm/templates/cloud_sql/tests/integration/cloud_sql_mssql.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Test of the Cloud SQL template. - -imports: - - path: templates/cloud_sql/cloud_sql.py - name: cloud_sql.py - -resources: - - name: ${MASTER_INSTANCE_NAME} - type: cloud_sql.py - properties: - databaseVersion: ${VERSION} - region: ${REGION} - rootPassword: ${ROOT_PASSWORD} - settings: - tier: ${MASTER_INSTANCE_TIER} - backupConfiguration: - startTime: ${BACKUP_START_TIME} - enabled: ${BACKUP_ENABLED} - locationPreference: - zone: ${MASTER_ZONE} - users: - - name: ${USER1_NAME} - host: ${USER1_HOST} - - name: ${USER2_NAME} - host: ${USER2_HOST} - databases: - - name: ${DB1} - - name: ${DB2} diff --git a/dm/templates/cloud_sql/tests/integration/cloud_sql_postgres.bats b/dm/templates/cloud_sql/tests/integration/cloud_sql_postgres.bats deleted file mode 100755 index cec0140378e..00000000000 --- a/dm/templates/cloud_sql/tests/integration/cloud_sql_postgres.bats +++ /dev/null @@ -1,166 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" - export MASTER_INSTANCE_NAME="cloud-sql-master-instance-${RAND}" - export VERSION="POSTGRES_9_6" - export MASTER_INSTANCE_TIER="db-f1-micro" - export MASTER_ZONE="us-central1-c" - export REPLICA_ZONE="us-central1-a" - export REGION="us-central1" - export REPLICA_INSTANCE_NAME="cloud-sql-replica-instance-${RAND}" - export REPLICA_INSTANCE_TIER="db-f1-micro" - export REPLICA_INSTANCE_TYPE="READ_REPLICA_INSTANCE" - export BACKUP_START_TIME="02:00" - export BACKUP_ENABLED="true" - export BACKUP_BL_ENABLED="true" - export USER1_NAME="user-1" - export USER1_HOST="10.1.1.1" - export USER1_PASS="dummy1" - export USER2_NAME="user-2" - export USER2_HOST="10.1.1.2" - export USER2_PASS="dummy2" - export DB1="db-1" - export DB2="db-2" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - envsubst < ${BATS_TEST_DIRNAME}/${TEST_NAME}.yaml > "${CONFIG}" -} - -function delete_config() { - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - rm -f "${RANDOM_FILE}" - delete_config - fi - - # Per-test teardown steps. -} - - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - run gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config "${CONFIG}" --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] -} - -@test "Verifying that both instances were created" { - run gcloud sql instances list \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${MASTER_INSTANCE_NAME}" ]] - [[ "$output" =~ "${REPLICA_INSTANCE_NAME}" ]] -} - -@test "Verifying master instance" { - run gcloud sql instances describe ${MASTER_INSTANCE_NAME} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${VERSION}" ]] - [[ "$output" =~ "${MASTER_INSTANCE_TIER}" ]] - [[ "$output" =~ "instanceType: CLOUD_SQL_INSTANCE" ]] - [[ "$output" =~ "region: ${REGION}" ]] - [[ "$output" =~ "${MASTER_ZONE}" ]] -} - -@test "Verifying master replica list" { - run gcloud sql instances describe ${MASTER_INSTANCE_NAME} \ - --format="yaml(replicaNames)" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${REPLICA_INSTANCE_NAME}" ]] -} - -@test "Verifying master database list" { - run gcloud sql databases list --instance ${MASTER_INSTANCE_NAME} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${DB1}" ]] - [[ "$output" =~ "${DB2}" ]] -} - -@test "Verifying master user list" { - run gcloud sql users list --instance ${MASTER_INSTANCE_NAME} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${USER1_NAME}" ]] - [[ "$output" =~ "${USER2_NAME}" ]] -} - -@test "Verifying master backup settings" { - run gcloud sql instances describe ${MASTER_INSTANCE_NAME} \ - --format="yaml(settings.backupConfiguration)" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "enabled: ${BACKUP_ENABLED}" ]] - [[ "$output" =~ "startTime: ${BACKUP_START_TIME}" ]] -} - -@test "Verifying replica instance" { - run gcloud sql instances describe ${REPLICA_INSTANCE_NAME} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${VERSION}" ]] - [[ "$output" =~ "${REPLICA_INSTANCE_TIER}" ]] - [[ "$output" =~ "${REPLICA_INSTANCE_TYPE}" ]] - [[ "$output" =~ "region: ${REGION}" ]] - [[ "$output" =~ "${REPLICA_ZONE}" ]] - [[ "$output" =~ "${MASTER_INSTANCE_NAME}" ]] -} - -@test "Deleting deployment" { - run gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" -q \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] -} diff --git a/dm/templates/cloud_sql/tests/integration/cloud_sql_postgres.yaml b/dm/templates/cloud_sql/tests/integration/cloud_sql_postgres.yaml deleted file mode 100644 index b719513c915..00000000000 --- a/dm/templates/cloud_sql/tests/integration/cloud_sql_postgres.yaml +++ /dev/null @@ -1,41 +0,0 @@ - -# Test of the Cloud SQL template. - -imports: - - path: templates/cloud_sql/cloud_sql.py - name: cloud_sql.py - -resources: - - name: ${MASTER_INSTANCE_NAME} - type: cloud_sql.py - properties: - databaseVersion: ${VERSION} - region: ${REGION} - settings: - tier: ${MASTER_INSTANCE_TIER} - backupConfiguration: - startTime: ${BACKUP_START_TIME} - enabled: ${BACKUP_ENABLED} - locationPreference: - zone: ${MASTER_ZONE} - users: - - name: ${USER1_NAME} - password: ${USER1_PASS} - - name: ${USER2_NAME} - password: ${USER2_PASS} - databases: - - name: ${DB1} - - name: ${DB2} - - - name: ${REPLICA_INSTANCE_NAME} - type: cloud_sql.py - properties: - databaseVersion: ${VERSION} - region: ${REGION} - instanceType: ${REPLICA_INSTANCE_TYPE} - settings: - tier: ${REPLICA_INSTANCE_TIER} - locationPreference: - zone: ${REPLICA_ZONE} - masterInstanceName: $(ref.${MASTER_INSTANCE_NAME}.name) - dependsOn: $(ref.${MASTER_INSTANCE_NAME}.resources) diff --git a/dm/templates/cloud_sql/tests/schemas/invalid_additional_options.yaml b/dm/templates/cloud_sql/tests/schemas/invalid_additional_options.yaml deleted file mode 100644 index 7c8d53098d7..00000000000 --- a/dm/templates/cloud_sql/tests/schemas/invalid_additional_options.yaml +++ /dev/null @@ -1,4 +0,0 @@ -region: us-central1 -settings: - tier: db-n1-standard-1 -foo: bar diff --git a/dm/templates/cloud_sql/tests/schemas/invalid_additional_options_nested.yaml b/dm/templates/cloud_sql/tests/schemas/invalid_additional_options_nested.yaml deleted file mode 100644 index 25f56ace31c..00000000000 --- a/dm/templates/cloud_sql/tests/schemas/invalid_additional_options_nested.yaml +++ /dev/null @@ -1,4 +0,0 @@ -region: us-central1 -settings: - tier: db-n1-standard-1 - foo: bar diff --git a/dm/templates/cloud_sql/tests/schemas/invalid_missing_region.yaml b/dm/templates/cloud_sql/tests/schemas/invalid_missing_region.yaml deleted file mode 100644 index f169277a46f..00000000000 --- a/dm/templates/cloud_sql/tests/schemas/invalid_missing_region.yaml +++ /dev/null @@ -1,2 +0,0 @@ -settings: - tier: db-n1-standard-1 diff --git a/dm/templates/cloud_sql/tests/schemas/invalid_missing_tier.yaml b/dm/templates/cloud_sql/tests/schemas/invalid_missing_tier.yaml deleted file mode 100644 index aa3e8a6672c..00000000000 --- a/dm/templates/cloud_sql/tests/schemas/invalid_missing_tier.yaml +++ /dev/null @@ -1 +0,0 @@ -region: us-central1 diff --git a/dm/templates/cloud_sql/tests/schemas/valid_basic.yaml b/dm/templates/cloud_sql/tests/schemas/valid_basic.yaml deleted file mode 100644 index 5693db2b0fb..00000000000 --- a/dm/templates/cloud_sql/tests/schemas/valid_basic.yaml +++ /dev/null @@ -1,5 +0,0 @@ -region: us-central1 -settings: - tier: db-n1-standard-1 -name: foo -project: foo diff --git a/dm/templates/cloud_sql/tests/schemas/valid_complex.yaml b/dm/templates/cloud_sql/tests/schemas/valid_complex.yaml deleted file mode 100644 index f71571802a5..00000000000 --- a/dm/templates/cloud_sql/tests/schemas/valid_complex.yaml +++ /dev/null @@ -1,17 +0,0 @@ -region: us-central1 -settings: - tier: db-n1-standard-1 - backupConfiguration: - startTime: '02:00' - enabled: true - binaryLogEnabled: true - locationPreference: - zone: us-central1-c -users: - - name: user-1 - host: 10.1.1.1 - - name: user-2 - host: 10.1.1.2 -databases: - - name: db-1 - - name: db-2 diff --git a/dm/templates/cloud_tasks/README.md b/dm/templates/cloud_tasks/README.md deleted file mode 100644 index a87572075c3..00000000000 --- a/dm/templates/cloud_tasks/README.md +++ /dev/null @@ -1,99 +0,0 @@ -# Cloud Tasks - -This set of two templates creates a Cloud Task and a Cloud Task Queue. - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Install gcloud **beta** components: - - ```(shell) - gcloud components update - gcloud components install beta - ``` - -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) -- Enable the [Cloud Tasks API](https://console.cloud.google.com/apis/library/cloudtasks.googleapis.com) - from the Google Cloud console -- Grant the [appengine.applications.get](https://cloud.google.com/appengine/docs/admin-api/access-control) - IAM permission to the Deployment Manager service account -- NOTE: Cloud Tasks requires an App Engine application. To run the integration tests - please ensure that an App Engine application exists. An App Engine app can be created using the [App Engine Template](../app_engine) or by running the [App Engine Template integration tests](../app_engine/tests/integration) - -## Deployment - -### Resources - -- [projects.locations.queues](https://cloud.google.com/tasks/docs/reference/rest/v2beta3/projects.locations.queues) -- [projects.locations.queues.tasks](https://cloud.google.com/tasks/docs/reference/rest/v2beta3/projects.locations.queues.tasks) -- [Task Queues](https://cloud.google.com/appengine/docs/standard/python/taskqueue/) -- [CloudTasks v2beta3 Descriptor URL](https://cloudtasks.googleapis.com/$discovery/rest?version=v2beta3) - -### Properties - -See the `properties` section in the schema file(s): - -- [CloudTasks Queue schema](queue.py.schema) -- [CloudTasks Task schema](task.py.schema) - -### Usage - -1. Clone the [Deployment Manager samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit) - - ```(shell) - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit - ``` - -2. Go to the [dm](../../) directory - - ```(shell) - cd dm - ``` - -3. Create a custom type-provider named `cloudtasks` - - ```(shell) - cp templates/cloud_tasks/examples/create_typeprovider.sh . - chmod u+x create_typeprovider.sh - ./create_typeprovider.sh - ``` - -4. Copy the example DM config to be used as a model for the deployment. In this case, [examples/cloud\_tasks\_queue.yaml](examples/cloud_tasks_queue.yaml) - - ```(shell) - cp templates/cloud_tasks/examples/cloud_tasks_queue.yaml my_cloud_tasks_queue.yaml - ``` - -5. Change the values in the config file to match your specific GCP setup. - Refer to the properties in the schema files described above. - - ```(shell) - vim my_cloud_tasks_queue.yaml - ``` - -6. Create your deployment as described below, replacing `` - with your with your own deployment name - - ```(shell) - gcloud beta deployment-manager deployments create \ - --config my_cloud_tasks_queue.yaml - ``` - -7. In case you need to delete your deployment - - ```(shell) - gcloud deployment-manager deployments delete - ``` - -8. To delete the custom `cloudtasks` type-provider - - ```(shell) - cp templates/cloud_tasks/examples/delete_typeprovider.sh . - chmod u+x delete_typeprovider.sh - ./delete_typeprovider.sh - ``` - -## Examples - -- [CloudTasks Queue](examples/cloud_tasks_queue.yaml) -- [CloudTasks Task](examples/cloud_tasks_task.yaml) diff --git a/dm/templates/cloud_tasks/examples/cloud_tasks_queue.yaml b/dm/templates/cloud_tasks/examples/cloud_tasks_queue.yaml deleted file mode 100644 index 43d80d816b9..00000000000 --- a/dm/templates/cloud_tasks/examples/cloud_tasks_queue.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# Example of the Cloud Task Queue (AppEngine) template usage. -# -# For details, refer to https://cloud.google.com/tasks/docs/reference/rest/v2beta3/projects.locations.queues. - -imports: - - path: templates/cloud_tasks/queue.py - name: queue.py - -resources: - - name: mysupertestqueue - type: queue.py - properties: - name: mysupertestqueue - rateLimits: - maxDispatchesPerSecond: 10 - maxConcurrentDispatches: 5 - retryConfig: - maxAttempts: 2 - maxRetryDuration: 60s - minBackoff: 10s - maxBackoff: 120s - appEngineHttpQueue: - appEngineRoutingOverride: - service: default diff --git a/dm/templates/cloud_tasks/examples/cloud_tasks_task.yaml b/dm/templates/cloud_tasks/examples/cloud_tasks_task.yaml deleted file mode 100644 index 57188c86ad4..00000000000 --- a/dm/templates/cloud_tasks/examples/cloud_tasks_task.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# Example of the Cloud Task template usage. -# -# For details, refer to https://cloud.google.com/tasks/docs/reference/rest/v2beta3/projects.locations.queues.tasks. - -imports: - - path: templates/cloud_tasks/task.py - name: task.py - -resources: - - name: mysupertesttask - type: task.py - properties: - queueId: mysupertestqueue - task: - appEngineHttpRequest: - httpMethod: POST - appEngineRouting: - service: default - relativeUri: / - responseView: FULL diff --git a/dm/templates/cloud_tasks/examples/create_typeprovider.sh b/dm/templates/cloud_tasks/examples/create_typeprovider.sh deleted file mode 100644 index 64f0c822846..00000000000 --- a/dm/templates/cloud_tasks/examples/create_typeprovider.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -set -e -set -o pipefail - -cat <<- EOF > ./options.yaml -options: - inputMappings: - - fieldName: Authorization - location: HEADER - value: > - $.concat("Bearer ", $.googleOauth2AccessToken()) -EOF - -# Create the type-provider. -gcloud beta deployment-manager type-providers create cloudtasks \ - --api-options-file=options.yaml \ - --descriptor-url="https://cloudtasks.googleapis.com/\$discovery/rest?version=v2beta3" - -exit 0 diff --git a/dm/templates/cloud_tasks/examples/delete_typeprovider.sh b/dm/templates/cloud_tasks/examples/delete_typeprovider.sh deleted file mode 100644 index 447b8029f21..00000000000 --- a/dm/templates/cloud_tasks/examples/delete_typeprovider.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash -set -e -set -o pipefail - -# Delete the type provider. -gcloud beta deployment-manager type-providers delete cloudtasks -q - -exit 0 diff --git a/dm/templates/cloud_tasks/queue.py b/dm/templates/cloud_tasks/queue.py deleted file mode 100644 index 30799a2b332..00000000000 --- a/dm/templates/cloud_tasks/queue.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" This template creates a Cloud Task queue. """ - - -def generate_config(context): - """ Entry point for the deployment resources. """ - - resources = [] - properties = context.properties - name = properties.get('name', context.env['name']) - project_id = properties.get('projectId', context.env['project']) - location = properties['location'] - parent = 'projects/{}/locations/{}'.format(project_id, location) - - queue = { - 'name': context.env['name'], - 'type': '{}/cloudtasks:projects.locations.queues'.format(project_id), - 'properties': { - 'name': '{}/queues/{}'.format(parent, name), - 'parent': parent, - 'appEngineHttpQueue': properties['appEngineHttpQueue'] - } - } - - optional_properties = ['rateLimits', 'retryConfig'] - - for prop in optional_properties: - if prop in properties: - queue['properties'][prop] = properties[prop] - - resources.append(queue) - - outputs = [ - { - 'name': 'name', - 'value': '$(ref.{}.name)'.format(context.env['name']) - }, - { - 'name': 'state', - 'value': '$(ref.{}.state)'.format(context.env['name']) - } - ] - - return {'resources': resources, 'outputs': outputs} diff --git a/dm/templates/cloud_tasks/queue.py.schema b/dm/templates/cloud_tasks/queue.py.schema deleted file mode 100644 index cfd848c6630..00000000000 --- a/dm/templates/cloud_tasks/queue.py.schema +++ /dev/null @@ -1,226 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Cloud Tasks Queue - author: Sourced Group Inc. - version: 1.0.0 - description: | - Supports creation of a Cloud Tasks Queue resource. - - For more information on this resource, see - https://cloud.google.com/tasks/docs/dual-overview - -additionalProperties: false - -required: - - name - -properties: - name: - type: string - description: | - The queue name. Can contain letters ([A-Za-z]), numbers ([0-9]), or - hyphens (-). The maximum length is 100 characters. - maximum: 100 - projectId: - type: string - description: | - The project ID of the project where the queue resides. Can contain - letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), or periods - (.). - location: - type: string - default: us-east1 - description: | - The canonical ID for the queue's location. The list of available - locations can be obtained by calling locations.list. - For more information, see - https://cloud.google.com/about/locations/. - rateLimits: - type: object - additionalProperties: false - description: | - Rate limits for task dispatches. Controls the total rate of dispatches - from a queue (i.e., all traffic dispatched from the queue, - regardless of whether the dispatch is from a first attempt or a retry). - properties: - maxDispatchesPerSecond: - type: number - description: | - The maximum rate at which tasks are dispatched from the queue. If - unspecified when the queue is created, Cloud Tasks picks the - default. For App Engine queues, the maximum allowed value is 500. - This field has the same meaning as rate in queue.yaml/xml. See: - https://cloud.google.com/appengine/docs/standard/python/config/queueref#rate. - maximum: 500 - maxConcurrentDispatches: - type: number - description: | - The maximum number of concurrent tasks that Cloud Tasks allows to be - dispatched from the queue. After the threshold has been reached, - Cloud Tasks stops dispatching tasks until the number of concurrent - requests decreases. If unspecified when the queue is created, Cloud - Tasks picks the default. This field has the same meaning as - max_concurrent_requests in queue.yaml/xml. For more info, see - https://cloud.google.com/appengine/docs/standard/python/config/queueref#max_concurrent_requests. - maximum: 5000 - retryConfig: - type: object - additionalProperties: false - description: | - Settings that determine the retry behavior. For tasks created using - Cloud Tasks: the queue-level retry settings apply to all tasks in the - queue that were created using Cloud Tasks. The retry settings cannot be - defined for individual tasks. For tasks created using the App Engine SDK: - the queue-level retry settings apply to all tasks in the queue that do - not have retry settings explicitly set pere task, and were created by - the App Engine SDK. See App Engine documentation: - https://cloud.google.com/appengine/docs/standard/python/taskqueue/push/retrying-tasks. - properties: - maxAttempts: - type: number - description: | - The number of attempts per task. Cloud Tasks attempts the task - maxAttempts times (that is, if the first attempt fails, then there - are maxAttempts - 1 retries). Must be >= -1. - If unspecified when the queue is created, Cloud Tasks picks the - default. -1 indicates unlimited attempts. - This field has the same meaning as task_retry_limit in queue.yaml/xml. - minimum: -1 - maxRetryDuration: - type: string - description: | - If positive, specifies the time limit for retrying a failed task, - measured from when the task was first attempted. Once - maxRetryDuration time has passed and the task has been attempted - maxAttempts times, no further attempts are made, and the task is - deleted. - If zero, the task age is unlimited. - If unspecified when the queue is created, Cloud Tasks picks the - default. maxRetryDuration is truncated to the nearest second. - This field has the same meaning as taskAgeLimit in queue.yaml/xml. - Expressed in seconds with up to nine fractional digits, terminated - by 's'. For example, "3.5s". - minBackoff: - type: string - description: | - A task is scheduled for retry between minBackoff and maxBackoff - duration after it fails, if the queue's RetryConfig specifies that - the task should be retried. If unspecified when the queue is created, - Cloud Tasks picks the default. minBackoff is truncated to the - nearest second. This field has the same meaning as - min_backoff_seconds in queue.yaml/xml. - https://cloud.google.com/appengine/docs/standard/python/config/queueref#retry_parameters - Expressed in seconds with up to nine fractional digits, terminated - by 's'. For example, "3.5s". - maxBackoff: - type: string - description: | - A task is scheduled for retry between minBackoff and maxBackoff - duration after it fails, if the queue's RetryConfig specifies that - the task should be retried. If unspecified when the queue is created, - Cloud Tasks picks the default. maxBackoff is truncated to the - nearest second. This field has the same meaning as - max_backoff_seconds in queue.yaml/xml. - https://cloud.google.com/appengine/docs/standard/python/config/queueref#retry_parameters - Expressed in seconds with up to nine fractional digits, terminated - by 's'. For example, "3.5s". - maxDoublings: - type: number - description: | - The time between retries will double maxDoublings times. A task's - retry interval starts at minBackoff, then doubles maxDoublings - times, then increases linearly, and finally retries retries at - intervals of maxBackoff up to maxAttempts times. - For example, if minBackoff is 10s, maxBackoff is 300s, and - maxDoublings is 3, then the a task will first be retried in 10s. - The retry interval will double three times, and then increase - linearly by 2^3 * 10s. Finally, the task will retry at intervals of - maxBackoff until the task has been attempted maxAttempts times. - Thus, the requests will retry at 10s, 20s, 40s, 80s, 160s, 240s, - 300s, 300s, .... If unspecified when the queue is created, Cloud - Tasks will pick the default. - appEngineHttpQueue: - type: object - additionalProperties: false - description: The App Engine HTTP queue. - properties: - appEngineRoutingOverride: - type: object - additionalProperties: false - description: | - Overrides for the task-level appEngineRouting. If set, - appEngineRoutingOverride is used for all tasks in the queue, - regardless of the task-level appEngineRouting settings. - For more info, see: - https://cloud.google.com/appengine/docs/standard/python/how-requests-are-routed - https://cloud.google.com/appengine/docs/flexible/python/how-requests-are-routed - https://cloud.google.com/tasks/docs/reference/rest/v2beta3/projects.locations.queues.tasks#AppEngineHttpRequest.FIELDS.app_engine_routing - properties: - service: - type: string - description: | - The app service. By default, the task is sent to the service that - is the default service when the task is attempted. For some - queues or tasks, which were created using the App Engine Task - Queue API, the host is not parsable into service, version, and - instance. For example, some tasks that were created using the - App Engine SDK use a custom domain name. Custom domains are not - parsed by Cloud Tasks. If the host is not parsable, the service, - version, and instance are an empty string. - version: - type: string - description: | - The app version. By default, the task is sent to the version that - is the default version when the task is attempted. For some - queues or tasks, which were created using the App Engine Task - Queue API, the host is not parsable into service, version, and - instance. For example, some tasks that were created using the - App Engine SDK use a custom domain name. Custom domains are not - parsed by Cloud Tasks. If the host is not parsable, the service, - version, and instance are an empty string. - instance: - type: string - description: | - The app instance. By default, the task is sent to an instance - that is available when the task is attempted. Requests can only - be sent to a specific instance if manual scaling is used in App - Engine Standard. App Engine Flex does not support instances. - For more information, see - App Engine Standard request routing: - https://cloud.google.com/appengine/docs/standard/python/how-requests-are-routed - and - App Engine Flex request routing: - https://cloud.google.com/appengine/docs/flexible/python/how-requests-are-routed - -outputs: - name: - type: string - description: The queue name. - state: - type: string - description: The state of the queue. - enum: - - STATE_UNSPECIFIED - - RUNNING - - PAUSED - - DISABLED - -documentation: - - templates/cloud_tasks/README.md - -examples: - - templates/cloud_tasks/examples/cloud_tasks_task.yaml - - templates/cloud_tasks/examples/cloud_tasks_queue.yaml diff --git a/dm/templates/cloud_tasks/task.py b/dm/templates/cloud_tasks/task.py deleted file mode 100644 index cddbe4c6885..00000000000 --- a/dm/templates/cloud_tasks/task.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" This template creates a Cloud Task resource. """ - - -def generate_queue_name(context): - """ Create queue name based on input. """ - - if ('projects/' in context.properties['queueId'] or - '$(ref.' in context.properties['queueId']): - # Full queue name or reference - queue_name = context.properties['queueId'] - else: - # Format the queue name - project_id = context.properties.get('projectId', context.env['project']) - queue_name = 'projects/{}/locations/{}/queues/{}'.format( - project_id, - context.properties['location'], - context.properties['queueId'] - ) - - return queue_name - - -def generate_config(context): - """ Entry point for the deployment resources. """ - - resources = [] - properties = context.properties - name = context.env['name'] - project_id = properties.get('projectId', context.env['project']) - parent = generate_queue_name(context) - - task = { - 'name': - context.env['name'], - 'type': - '{}/cloudtasks:projects.locations.queues.tasks'.format(project_id), - 'properties': - { - 'parent': parent, - 'task': - { - 'name': - '{}/tasks/{}'.format(parent, - name), - 'appEngineHttpRequest': - properties['task']['appEngineHttpRequest'] - } - } - } - - optional_properties = ['scheduleTime'] - - for prop in optional_properties: - if prop in properties['task']: - task['properties']['task'][prop] = properties['task'][prop] - - resources.append(task) - - return { - 'resources': resources, - 'outputs': [ - { - 'name':'name', - 'value': '$(ref.{}.name)'.format(context.env['name']) - }, - { - 'name':'createTime', - 'value': '$(ref.{}.createTime)'.format(context.env['name']) - }, - { - 'name':'view', - 'value': '$(ref.{}.view)'.format(context.env['name']) - }, - { - 'name':'scheduleTime', - 'value': '$(ref.{}.scheduleTime)'.format(context.env['name']) - } - ] - } diff --git a/dm/templates/cloud_tasks/task.py.schema b/dm/templates/cloud_tasks/task.py.schema deleted file mode 100644 index 230062d15a7..00000000000 --- a/dm/templates/cloud_tasks/task.py.schema +++ /dev/null @@ -1,194 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Cloud Tasks - author: Sourced Group Inc. - version: 1.0.0 - description: | - Supports creation of a Cloud Task resource. - - For more information on this resource, see - https://cloud.google.com/tasks/docs/dual-overview. - -required: - - task - - queueId - -properties: - queueId: - type: string - description: | - The name of the queue under which the task is created. - This can be one of the following: - - The full name of the queue which follows the format - projects//locations//queues/ or - - Can contain letters ([A-Za-z]), numbers ([0-9]), or hyphens (-). - The maximum length is 100 characters. - projectId: - type: string - description: | - The project ID if the project where the task resides. Can contain - letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), or - periods (.). - location: - type: string - default: us-east1 - description: | - The canonical ID for the queue's location. The list of available - locations can be obtained by calling locations.list. - For more information, see - https://cloud.google.com/about/locations/. - If `queueId` is the full name of the queue, this field is ignored. - task: - type: object - required: - - appEngineHttpRequest - description: | - The task to add. Task names have the following format: - projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID. - You can optionally specify a task name. If a name is not specified, the - system generates a random unique task ID, which will be set in the task - returned in the response. If scheduleTime is not set, or is in - the past, Cloud Tasks sets it to the current time. - properties: - name: - type: string - description: | - The task name (optional). Can contain letters ([A-Za-z]), numbers - ([0-9]), or hyphens (-). The maximum length is 100 characters. - maximum: 500 - scheduleTime: - type: string - description: | - The time when the task is scheduled to be attempted. For App Engine - queues, this is when the task is to be attempted or retried. - scheduleTime is truncated to the nearest microsecond. - Expressed in the RFC3339 UTC "Zulu" format, accurate to nanoseconds. - For example, "2014-10-02T15:01:23.045123456Z". - appEngineHttpRequest: - type: object - additionalProperties: false - description: | - Defines the HTTP request that is sent to an App Engine app when the - task is dispatched. This can only be used for tasks in a queue with - appEngineHttpQueue set. The task is delivered to the App Engine app - that belongs to the same project as the queue. For information on - how requests are routed and on how routing is affected by dispatch - files, see - https://cloud.google.com/appengine/docs/standard/python/how-requests-are-routed - https://cloud.google.com/appengine/docs/python/config/dispatchref - properties: - httpMethod: - type: string - description: The HTTP method used to execute the task. - enum: - - HTTP_METHOD_UNSPECIFIED - - POST - - GET - - HEAD - - PUT - - DELETE - appEngineRouting: - type: object - additionalProperties: false - description: Task-level settings for App Engine routing. - properties: - service: - type: string - description: | - The app service. By default, the task is sent to the service - that is the default service when the task is attempted. For - some queues or tasks, which were created using the App Engine - Task Queue API, the host is not parsable into service, - version, and instance. For example, some tasks that were - created using the App Engine SDK use a custom domain name. - Custom domains are not parsed by Cloud Tasks. If the host is - not parsable, the service, version, and instance are an empty - string. - version: - type: string - description: | - The app version. By default, the task is sent to the version - that is the default version when the task is attempted. For - some queues or tasks, which were created using the App Engine - Task Queue API, the host is not parsable into service, - version, and instance. For example, some tasks that were - created using the App Engine SDK use a custom domain name. - Custom domains are not parsed by Cloud Tasks. If the host is - not parsable, the service, version, and instance are an - empty string. - instance: - type: string - description: | - The app instance. By default, the task is sent to an instance - that is available when the task is attempted. Requests can - only be sent to a specific instance if manual scaling is used - in App Engine Standard. App Engine Flex does not support - instances. For more information, see App Engine Standard - request routing and App Engine Flex request routing. - relativeUri: - type: string - description: | - The relative URI. Must begin with "/" and must be a valid HTTP - relative URI. It can contain a path and query string arguments. - If the relative URI is empty, the root path "/" is used. No - spaces are allowed, and the maximum length is 2083 characters. - maximum: 2083 - headers: - type: object - description: | - HTTP Request headers. This map contains the header field names - and values. Headers can be set when the task is created. - Repeated headers are not supported but a header value can contain - commas. For more details, see - https://cloud.google.com/tasks/docs/reference/rest/v2beta3/projects.locations.queues.tasks#AppEngineHttpRequest - A list of key-value pairs. For example, { "name": "wrench", - "mass": "1.3kg", "count": "3" }. - body: - type: string - description: | - The HTTP request body. A request body is allowed only if the HTTP - method is POST or PUT. It is an error to set a body on a task - with an incompatible HttpMethod. A base64-encoded string. - -outputs: - name: - type: string - description: The name of the task resource. - createTime: - type: string - description: | - The time when the task was created. createTime is truncated to the - nearest second. - view: - type: string - description: The subset of the Task data. - enum: - - VIEW_UNSPECIFIED - - BASIC - - FULL - scheduleTime: - type: string - description: | - The time when the task is scheduled to be attempted. For App Engine - queues, this is when the task will be attempted or retried. - `schedule_time` will be truncated to the nearest microsecond. - -documentation: - - templates/cloud_tasks/README.md - -examples: - - templates/cloud_tasks/examples/cloud_tasks_task.yaml - - templates/cloud_tasks/examples/cloud_tasks_queues.yaml diff --git a/dm/templates/cloud_tasks/tests/integration/cloud_tasks.bats b/dm/templates/cloud_tasks/tests/integration/cloud_tasks.bats deleted file mode 100644 index eebed014046..00000000000 --- a/dm/templates/cloud_tasks/tests/integration/cloud_tasks.bats +++ /dev/null @@ -1,135 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" - # Test specific variables. - export QUEUE_NAME="test-q-${RAND}" - export TASK_NAME="test-task-${RAND}" - export DISPATCHES_PER_SECOND="10.0" - export CONCURRENT_DISPATCHES="5" - export MAX_ATTEMPTS="100" - export MAX_RETRY_DURATION="60s" - export MAX_BACKOFF="3600s" - export MIN_BACKOFF="0.100s" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < ${BATS_TEST_DIRNAME}/${TEST_NAME}.yaml > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - delete_config - rm -f "${RANDOM_FILE}" - fi - - # Per-test teardown steps. -} - -########## TESTS ########## - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - run gcloud beta deployment-manager deployments create "${DEPLOYMENT_NAME}"\ - --config "${CONFIG}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] -} - -sleep 2 - -@test "Verify if queue ${QUEUE_NAME} was created " { - run gcloud beta tasks queues list --format="value(name)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${QUEUE_NAME}" ]] -} - -@test "Verify if queue ${QUEUE_NAME} is RUNNING" { - run gcloud beta tasks queues describe ${QUEUE_NAME} \ - --format="value(state)" - [[ "$status" -eq 0 ]] - [[ "$output" -eq "RUNNING" ]] -} - -@test "Verify if maxDispatchesPerSecond is ${DISPATCHES_PER_SECOND}" { - run gcloud beta tasks queues describe ${QUEUE_NAME} \ - --format="value(rateLimits.maxDispatchesPerSecond)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${DISPATCHES_PER_SECOND}" ]] -} - -@test "Verify if maxConcurrentDispatches is ${CONCURRENT_DISPATCHES}" { - run gcloud beta tasks queues describe ${QUEUE_NAME} \ - --format="value(rateLimits.maxConcurrentDispatches)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${CONCURRENT_DISPATCHES}" ]] -} - -@test "Verify if retryConfig maxAttempts is set to ${MAX_ATTEMPTS}" { - run gcloud beta tasks queues describe ${QUEUE_NAME} \ - --format="value(retryConfig.maxAttempts)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${MAX_ATTEMPTS}" ]] -} - -@test "Verify if retryConfig maxBackoff is set to ${MAX_BACKOFF}" { - run gcloud beta tasks queues describe ${QUEUE_NAME} \ - --format="value(retryConfig.maxBackoff)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${MAX_BACKOFF}" ]] -} - -@test "Verify if retryConfig minBackoff is set to ${MIN_BACKOFF}" { - run gcloud beta tasks queues describe ${QUEUE_NAME} \ - --format="value(retryConfig.minBackoff)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${MIN_BACKOFF}" ]] -} - -@test "Verify if the task ${TASK_NAME} was created " { - run gcloud beta tasks describe ${TASK_NAME} \ - --queue ${QUEUE_NAME} - [[ "$status" -eq 0 ]] - [[ "$output" =~ "createTime:" ]] - [[ "$output" =~ "${TASK_NAME}" ]] -} - -@test "Deleting deployment ${DEPLOYMENT_NAME}" { - run gcloud beta deployment-manager deployments delete "${DEPLOYMENT_NAME}"\ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -q - [[ "$status" -eq 0 ]] -} diff --git a/dm/templates/cloud_tasks/tests/integration/cloud_tasks.yaml b/dm/templates/cloud_tasks/tests/integration/cloud_tasks.yaml deleted file mode 100644 index cd24765436a..00000000000 --- a/dm/templates/cloud_tasks/tests/integration/cloud_tasks.yaml +++ /dev/null @@ -1,36 +0,0 @@ -# Test of Cloud Tasks template. - -imports: - - path: templates/cloud_tasks/queue.py - name: queue.py - - path: templates/cloud_tasks/task.py - name: task.py - -resources: - - name: ${QUEUE_NAME} - type: queue.py - properties: - name: ${QUEUE_NAME} - rateLimits: - maxDispatchesPerSecond: ${DISPATCHES_PER_SECOND} - maxConcurrentDispatches: ${CONCURRENT_DISPATCHES} - retryConfig: - maxAttempts: ${MAX_ATTEMPTS} - maxRetryDuration: ${MAX_RETRY_DURATION} - minBackoff: ${MIN_BACKOFF} - maxBackoff: ${MAX_BACKOFF} - appEngineHttpQueue: - appEngineRoutingOverride: - service: "default" - - - name: ${TASK_NAME} - type: task.py - properties: - queueId: $(ref.${QUEUE_NAME}.name) - task: - appEngineHttpRequest: - httpMethod: POST - appEngineRouting: - service: default - relativeUri: / - responseView: FULL diff --git a/dm/templates/cloudbuild/README.md b/dm/templates/cloudbuild/README.md deleted file mode 100644 index 3b0b2d2b42a..00000000000 --- a/dm/templates/cloudbuild/README.md +++ /dev/null @@ -1,74 +0,0 @@ -# Cloud Build - -This template creates a Google Cloud Build. - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) -- Enable the following in the [APIs & Services](https://console.cloud.google.com/apis/dashboard) section of the Google Cloud console: - - [Cloud Build API](https://console.cloud.google.com/apis/library/cloudbuild.googleapis.com) - - [Cloud Source Repositories API](https://console.cloud.google.com/apis/library/sourcerepo.googleapis.com) - - [Container Registry API](https://console.cloud.google.com/apis/library/containerregistry.googleapis.com) -- Grant to the Cloud Build service account the IAM roles necessary for the steps in your build - -## Deployment - -### Resources - -- [projects.builds](https://cloud.google.com/cloud-build/docs/api/reference/rest/v1/projects.builds) -- [cloud builders](https://cloud.google.com/cloud-build/docs/cloud-builders) -- [cloud builders community](https://github.com/GoogleCloudPlatform/cloud-builders-community) - -### Properties - -See the `properties` section in the schema file(s): - -- [CloudBuild build schema](cloudbuild.py.schema) - -### Usage - -1. Clone the [Deployment Manager samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit) - - ```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit - ``` - -2. Go to the [dm](../../) directory - - ```shell - cd dm - ``` - -3. Copy the example DM config to be used as a model for the deployment, in this case [examples/cloudbuild.yaml](examples/cloudbuild.yaml) - - ```shell - cp templates/cloudbuild/examples/cloudbuild.yaml my_cloudbuild.yaml - ``` - -4. Change the values in the config file to match your specific GCP setup. - Refer to the properties in the schema files described above. - - ```shell - vim my_cloudbuild.yaml # <== change values to match your GCP setup - ``` - -5. Create your deployment as described below, replacing `` - with your with your own deployment name - - ```shell - gcloud deployment-manager deployments create \ - --config my_cloudbuild.yaml - ``` - -6. In case you need to delete your deployment: - - ```shell - gcloud deployment-manager deployments delete - ``` - -## Examples - -- [Cloud Build](examples/cloudbuild.yaml) -- [Cloud Build with StorageSource](examples/cloudbuild_storagesource.yaml) -- [Cloud Build with RepoSource](examples/cloudbuild_reposource.yaml) diff --git a/dm/templates/cloudbuild/cloudbuild.py b/dm/templates/cloudbuild/cloudbuild.py deleted file mode 100644 index e7d79e4a75d..00000000000 --- a/dm/templates/cloudbuild/cloudbuild.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" This template creates a Cloud Build resource. """ - - -def generate_config(context): - """ Entry point for the deployment resources. """ - - resources = [] - outputs = [] - properties = context.properties - project_id = properties.get('project', context.env['project']) - name = context.env['name'] - build_steps = properties['steps'] - cloud_build = { - 'name': name, - # https://cloud.google.com/cloud-build/docs/api/reference/rest/v1/projects.builds/create - 'type': 'gcp-types/cloudbuild-v1:cloudbuild.projects.builds.create', - 'properties': { - 'projectId': project_id, - 'steps': build_steps - } - } - - optional_properties = [ - 'source', - 'timeout', - 'images', - 'artifacts', - 'logsBucket', - 'options', - 'substitutions', - 'tags', - 'secrets' - ] - - for prop in optional_properties: - if prop in properties: - cloud_build['properties'][prop] = properties[prop] - - resources.append(cloud_build) - - # Output variables - output_props = [ - 'id', - 'status', - 'results', - 'createTime', - 'startTime', - 'finishTime', - 'logUrl', - 'sourceProvenance' - ] - - for outprop in output_props: - output_obj = {} - output_obj['name'] = outprop - output_obj['value'] = '$(ref.{}.{})'.format(name, outprop) - outputs.append(output_obj) - - return {'resources': resources, 'outputs': outputs} diff --git a/dm/templates/cloudbuild/cloudbuild.py.schema b/dm/templates/cloudbuild/cloudbuild.py.schema deleted file mode 100644 index cfbe5cad269..00000000000 --- a/dm/templates/cloudbuild/cloudbuild.py.schema +++ /dev/null @@ -1,480 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Cloud Build - author: Sourced Group Inc. - version: 1.1.0 - description: | - Creates a Cloud Build resource. - - For more information on this resource, see - https://cloud.google.com/cloud-build/docs/ - - APIs endpoints used by this template: - - gcp-types/cloudbuild-v1:cloudbuild.projects.builds.create => - https://cloud.google.com/cloud-build/docs/api/reference/rest/v1/projects.builds/create - -additionalProperties: false - -required: - - steps - -properties: - project: - type: string - description: | - The project ID of the project containing resources. The - Google apps domain is prefixed if applicable. - source: - description: The location of the source files to build. - additionalProperties: false - oneOf: - - required: - - storageSource - - required: - - repoSource - properties: - storageSource: - $ref: "#/definitions/storageSource" - repoSource: - $ref: "#/definitions/repoSource" - steps: - type: array - uniqItems: true - description: The list of steps in the build pipeline. - items: - type: object - additionalProperties: false - description: A step in the build pipeline. - required: - - name - properties: - name: - type: string - description: | - The name of the container image that will run this particular build step. - - If the image is available in the host's Docker daemon's cache, it will be run directly. If not, the host - will attempt to pull the image first, using the builder service account's credentials if necessary. - - The Docker daemon's cache will already have the latest versions of all of the officially supported - build steps (https://github.com/GoogleCloudPlatform/cloud-builders). The Docker daemon will also have - cached many of the layers for some popular images, like "ubuntu", "debian", but they will be refreshed - at the time you attempt to use them. - - If you built an image in a previous build step, it will be stored in the host's Docker daemon's cache - and is available to use as the name for a later build step. - env: - type: array - uniqItems: true - description: | - The list of environment variable definitions to be used when - running a step. The elements are in the "KEY=VALUE" form, - wherein the environment variable "KEY" is given the value "VALUE". - items: - type: string - args: - type: array - description: | - The list of build arguments to be passed to the step when - it is started. If the image used to run the step's container has an - entry point, the array elements are used as arguments to that entry - point. If the image does not define an entry point, the first - element in the array is used as the entry point, and the remainder - are used as arguments. - items: - type: string - dir: - type: string - description: | - The working directory to use when running the step's container. - If the value is a relative path, it is relative to the build's - working directory. If this value is an absolute path, it may be - outside the build's working directory. In this case, the contents - of the path may not be persisted across the build step executions, - unless a volume for that path is specified. - If the build specifies a RepoSource with dir and a step with a dir, - which specifies an absolute path, the RepoSource dir is ignored - for the step's execution. - id: - type: string - description: | - The unique identifier for the build step. Used in waitFor to - reference this build step as a dependency. - waitFor: - type: array - uniqItems: true - description: | - The ID(s) of the step(s) that the build step depends on. - This build step will not start until all the build steps in waitFor - have completed successfully. If waitFor is empty, this build step - will start when all the previous build steps in the Build.Steps - list have completed successfully. - items: - type: string - entrypoint: - type: string - description: | - The entry point to be used instead of the build step image's - default entry point. If not set, the image's default entry - point is used. - secretEnv: - type: array - uniqItems: true - description: | - The list of environment variables which are encrypted using the - Cloud Key Management Service crypto key. These values must be - specified in the build's Secret. - items: - type: string - volumes: - type: array - uniqItems: true - description: The list of volumes to mount into the build step. - items: - type: object - additionalProperties: false - properties: - name: - type: string - description: | - The name of the volume to mount. Volume names must be unique - per build step, and must be valid names for Docker volumes. - Each named volume must be used by at least two build steps. - path: - type: string - description: | - The path to mount the volume at. Paths must be absolute. - They cannot conflict with other volume paths on the same - build step or with certain reserved volume paths. - timeout: - type: string - description: | - The time limit for executing the build step. If not defined, the - step has no time limit, and is allowed to continue to run - either until it completes or until the build itself times out. - Defined in seconds with up to nine fractional digits, - terminated by 's'; for example 3.5s. - timeout: - type: string - description: | - The time limit for executing the build step. If not defined, the - step has no time limit, and is allowed to continue to run - either until it completes or until the build itself times out. - Defined in seconds with up to nine fractional digits, - terminated by 's'; for example 3.5s. - images: - type: array - uniqItems: true - description: | - The list of images to be pushed upon successful completion of all build - steps. The images are pushed using the Builder service account's - credentials. The digests of the pushed images are stored in the Build - resource's results field. If any of the images fail to be pushed, the - build status is marked as FAILURE. - items: - type: string - artifacts: - type: object - additionalProperties: false - description: | - Artifacts produced by the build, to be uploaded upon successful - completion of all build steps. - properties: - images: - type: array - uniqItems: true - description: | - The list of images to be pushed upon successful completion of all - build steps. The images are pushed using the Builder serviceaccount's - credentials. The digests of the pushed images are stored in the Build - resource's results field. If any of the images fail to be pushed, the - build status is marked as FAILURE. - items: - type: string - objects: - type: object - additionalProperties: false - description: | - The list of objects to be uploaded to Cloud Storage upon successful - completion of all build steps. Files in the workspace matching - the specified paths globs are uploaded using the Builder - serviceaccount's credentials. - properties: - location: - type: string - description: | - The Cloud Storage bucket, with an optional object path, in - the "gs://bucket/path/to/somewhere/" form. Files in the - workspace matching any pattern specified uder that path are - uploaded to Cloud Storage with this location as a prefix. - paths: - type: array - uniqItems: true - description: | - Path globs used to match files in the build's workspace. - items: - type: string - logsBucket: - type: string - description: | - The Google Cloud Storage bucket where logs are written. - Log file names are in the ${logsBucket}/log-${build_id}.txt format. - options: - type: object - additionalProperties: false - description: Special options for the build. - properties: - sourceProvenanceHash: - type: array - items: - type: string - enum: - - NONE - - SHA256 - - MD5 - requestedVerifyOption: - type: string - description: The requested verifiability option. - enum: - - NOT_VERIFIED - - VERIFIED - machineType: - type: string - description: | - The type of the Compute Engine machine to run the build on. - enum: - - UNSPECIFIED - - N1_HIGHCPU_8 - - N1_HIGHCPU_32 - diskSizeGb: - type: string - description: | - The requested disk size for the VM that runs the build. This is - NOT "disk free"; some of the space is used by the operating - system and build utilities. - substitutionOption: - type: string - description: | - The option to specify behavior when there is an error in the - substitution checks. - enum: - - MUST_MATCH - - ALLOW_LOOSE - logStreamingOption: - type: string - description: | - The option to define the build's behavior when streaming logs to - Google Cloud Storage. - enum: - - STREAM_DEFAULT - - STREAM_ON - - STREAM_OFF - workerPool: - type: string - description: | - Option to specify a WorkerPool for the build. User specifies the pool with the format - "[WORKERPOOL_PROJECT_ID]/[WORKERPOOL_NAME]". This is an experimental field. - logging: - type: string - description: | - The option to specify the logging mode, which determines where the - logs are stored. - enum: - - LOGGING_UNSPECIFIED - - LEGACY - - GCS_ONLY - point is used. - env: - type: array - uniqItems: true - description: | - A list of global environment variable definitions that will exist for all build steps in this build. - If a variable is defined in both globally and in a build step, the variable will use the build step value. - - The elements are of the form "KEY=VALUE" for the environment variable "KEY" being given the value "VALUE". - items: - type: string - secretEnv: - type: array - uniqItems: true - description: | - A list of global environment variables, which are encrypted using a Cloud Key Management Service crypto key. - These values must be specified in the build's Secret. These variables will be available to - all build steps in this build. - items: - type: string - volumes: - type: array - uniqItems: true - description: | - Global list of volumes to mount for ALL build steps - - Each volume is created as an empty volume prior to starting the build process. Upon completion of the build, - volumes and their contents are discarded. Global volume names and paths cannot conflict with the volumes - defined a build step. - - Using a global volume in a build with only one step is not valid as it is indicative of a build request - with an incorrect configuration. - items: - type: object - additionalProperties: false - properties: - name: - type: string - description: | - The name of the volume to mount. Volume names must be unique - per build step, and must be valid names for Docker volumes. - Each named volume must be used by at least two build steps. - path: - type: string - description: | - The path to mount the volume at. Paths must be absolute. - They cannot conflict with other volume paths on the same - build step or with certain reserved volume paths. - substitutions: - type: object - description: | - Substitution data for the Build resource in the key-value format. - Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. - tags: - type: array - uniqItems: true - description: | - Tags for annotation of a Build. These are not docker tags. - items: - type: string - secrets: - type: object - additionalProperties: false - description: Secrets to decrypt using Cloud Key Management Service. - properties: - secret: - type: object - additionalProperties: false - properties: - kmsKeyName: - type: string - description: | - The Cloud KMS key name to use for decrypting the environment - variables. - secretEnv: - type: object - description: | - The map of the environment variable name to its encrypted value. - -definitions: - storageSource: - type: object - description: | - The location of the source in an archive file on Google Cloud Storage. - required: - - bucket - - object - properties: - bucket: - type: string - description: The Google Cloud Storage bucket containing the source. - object: - type: string - description: | - The Google Cloud Storage object containing the source. Must be a - gzipped archive file (*.tar.gz). - to build. - generation: - type: string - description: | - The Google Cloud Storage generation for the object. If omitted, the - latest generation is used. - repoSource: - type: object - description: | - The location of the source in the Google Cloud Source repository. - oneOf: - - required: - - branchName - - required: - - tagName - - required: - - commitSha - properties: - projectId: - type: string - description: | - The ID of the project that owns the Cloud Source repository. - repoName: - type: string - description: | - The name of the Google Cloud Source repository. If omitted, the - name "default" is assumed. - dir: - type: string - description: | - The directory to run the build in (relative to the source root). - If a step's dir is specified as an absolute path, the value is - ignored for that step's execution. - branchName: - $ref: "#/definitions/branchName" - tagName: - $ref: "#/definitions/tagName" - commitSha: - $ref: "#/definitions/commitSha" - branchName: - type: string - description: The name of the branch to build. - tagName: - type: string - description: The name of the tag to build. - commitSha: - type: string - description: The explicit commit SHA to build. - -outputs: - id: - type: string - description: Build Unique identifier. - status: - type: string - description: The build status. - results: - type: object - description: The build results. - createTime: - type: string - description: | - The time the build was requested, in the RFC3339 UTC "Zulu" format. - startTime: - type: string - description: | - The time the build execution started, in the RFC3339 UTC "Zulu" format. - finishTime: - type: string - description: | - The time the build finished, in the RFC3339 UTC "Zulu" format. - logUrl: - type: string - description: The URL to the build logs in the Google Cloud console. - sourceProvenance: - type: object - description: | - Fixed identifier to used to describe the original source, or verify that some source was used for this build. - -documentation: - - templates/cloudbuild/README.md - -examples: - - templates/cloudbuild/examples/cloudbuild.yaml - - templates/cloudbuild/examples/cloudbuild_storagesource.yaml - - templates/cloudbuild/examples/cloudbuild_reposource.yaml diff --git a/dm/templates/cloudbuild/examples/cloudbuild.yaml b/dm/templates/cloudbuild/examples/cloudbuild.yaml deleted file mode 100644 index 66f4497ffc7..00000000000 --- a/dm/templates/cloudbuild/examples/cloudbuild.yaml +++ /dev/null @@ -1,43 +0,0 @@ -# Example of the Cloud Build template usage. -# -# In this example, a sample NodeJS "Hello World" app container is built and pushed to -# the container registry. -# -# The Build steps are as follows: -# Step1: Clone the cloud-builders GitHub repository -# Step2: Build the Docker image from Dockerfile in the npm/examples/hello_world dir -# Step3: Tag the image as gcr.io/$PROJECT_ID/${_IMAGE_NAME} -# Step4: Push the image to the container registry gcr.io/$PROJECT_ID/${_IMAGE_NAME} - -imports: - - path: templates/cloudbuild/cloudbuild.py - name: build.py - -resources: - - name: mycloudbuild - type: build.py - properties: - steps: - # Step 1 - - name: 'gcr.io/cloud-builders/git' - args: - - clone - - https://github.com/GoogleCloudPlatform/cloud-builders - # Step 2 - - name: 'gcr.io/cloud-builders/docker' - args: - - build - # Step 3 - - '--tag=gcr.io/$PROJECT_ID/${_IMAGE_NAME}' - - '.' - dir: cloud-builders/npm/examples/hello_world - # Step 4 - - name: 'gcr.io/cloud-builders/docker' - args: - - push - - 'gcr.io/$PROJECT_ID/${_IMAGE_NAME}' - substitutions: - _IMAGE_NAME: npm-helloworld - images: - - 'gcr.io/$PROJECT_ID/${_IMAGE_NAME}' - timeout: '200s' diff --git a/dm/templates/cloudbuild/examples/cloudbuild_reposource.yaml b/dm/templates/cloudbuild/examples/cloudbuild_reposource.yaml deleted file mode 100644 index da00094dae5..00000000000 --- a/dm/templates/cloudbuild/examples/cloudbuild_reposource.yaml +++ /dev/null @@ -1,45 +0,0 @@ -# Example of the Cloud Build template usage. -# -# In this example, a Build is launched for the following Github repo. -# https://github.com/GoogleCloudBuild/gcbapp-example -# -# Pre-requisites steps for this build example: -# Step1: Login to github and fork the following github repo -# https://github.com/GoogleCloudBuild/gcbapp-example -# Step2: Create a Cloud Source repo, name your repo -# After the repo is created successfully do the following -# - Select "Automatically mirror from Github/Bitbucket" option -# - Select "Hosting Service", Choose "Github" from dropdown then Connect -# - Follow steps to Authenticate your Github repo -# - Once authenticated, select the repo (from Step1) from the list -# - Select, "I consent .." checkbox then Click "connect" -# Step3: Replace with the repo name created in Step2 -# - -imports: - - path: templates/cloudbuild/cloudbuild.py - name: build.py - -resources: - - name: mycloudbuild - type: build.py - properties: - source: - repoSource: - repoName: '' - branchName: 'master' - steps: - - name: 'gcr.io/cloud-builders/docker' - args: - - 'build' - - '--tag=gcr.io/$PROJECT_ID/${_IMAGE_NAME}:latest' - - '.' - - name: 'gcr.io/cloud-builders/docker' - args: - - 'push' - - 'gcr.io/$PROJECT_ID/${_IMAGE_NAME}' - substitutions: - _IMAGE_NAME: '' - images: - - 'gcr.io/$PROJECT_ID/${_IMAGE_NAME}' - timeout: '600s' diff --git a/dm/templates/cloudbuild/examples/cloudbuild_storagesource.yaml b/dm/templates/cloudbuild/examples/cloudbuild_storagesource.yaml deleted file mode 100644 index 20c59c265a0..00000000000 --- a/dm/templates/cloudbuild/examples/cloudbuild_storagesource.yaml +++ /dev/null @@ -1,46 +0,0 @@ -# Example of the Cloud Build template usage. -# -# In this example, a Cloud Build is triggered with source from a storage bucket. -# -# Replace the following placeholders with valid values: -# : a valid storage bucket name (name of an existing -# storage bucket) where the source resides; -# e.g.: myapp-source-bucket -# NOTE: do not include neither "gs://" nor trailing slash -# : a path to the source archive from the root of the bucket; -# e.g. docker/mydockerfile.tar.gz -# NOTE: source files must be uploaded as a gzip compressed -# tar archive -# : a cloud-builder supported on Google Cloud; -# see details at -# https://cloud.google.com/cloud-build/docs/cloud-builders -# : a list of build arguments supported by the builder; -# enter each argument on a separate line as show below -# Example build step: -# - name: 'gcr.io/cloud-builders/git' -# args: -# - clone -# - https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit - -imports: - - path: templates/cloudbuild/cloudbuild.py - name: build.py - -resources: - - name: mycloudbuild - type: build.py - properties: - source: - storageSource: - bucket: - object: - steps: - - name: 'gcr.io/cloud-builders/' - args: - - - - - - name: 'gcr.io/cloud-builders/' - args: - - - - - timeout: '200s' diff --git a/dm/templates/cloudbuild/tests/integration/cloudbuild.bats b/dm/templates/cloudbuild/tests/integration/cloudbuild.bats deleted file mode 100644 index e81796255f2..00000000000 --- a/dm/templates/cloudbuild/tests/integration/cloudbuild.bats +++ /dev/null @@ -1,135 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" - # Test specific variables - export CLOUDBUILD_NAME="test-build-${RAND}" - export BUILD_TIMEOUT="500s" - export IMAGE_NAME="test-npm-helloworld-${RAND}" - export IMAGE_TAG="gcr.io/${CLOUD_FOUNDATION_PROJECT_ID}/${IMAGE_NAME}" - export LOGURL_BASE="https://console.cloud.google.com/gcr/builds/" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < ${BATS_TEST_DIRNAME}/${TEST_NAME}.yaml > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - delete_config - rm -f "${RANDOM_FILE}" - # delete the container image from registry - gcloud container images delete ${IMAGE_TAG}:latest -q - fi - - # Per-test teardown steps. -} - -########## TESTS ########## - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - run gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config "${CONFIG}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "$output" - [[ "$status" -eq 0 ]] -} - -# Get the BuildID after DM deployment and store it in a variable for reuse -export ID=$(gcloud builds list --format="value(id)" --filter="(${IMAGE_NAME})") - -@test "Verify if build ${CLOUDBUILD_NAME} was created " { - run gcloud builds list --format="value(id,images)" \ - --filter="(${IMAGE_NAME})" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${IMAGE_NAME}" ]] -} - -@test "Verify if build ${CLOUDBUILD_NAME} status is a SUCCESS" { - run gcloud builds describe $ID --format="value(status)" - [[ "$status" -eq 0 ]] - [[ "$output" -eq "SUCCESS" ]] -} - -@test "Verify if build timeout is set to ${BUILD_TIMEOUT}" { - run gcloud builds describe $ID --format="value(timeout)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${BUILD_TIMEOUT}" ]] -} - -@test "Verify if cloud-builder in STEP 1 is git" { - run gcloud builds describe $ID --format="value(steps[0].name)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "git" ]] -} - -@test "Verify if first build arg in STEP 1 is clone" { - run gcloud builds describe $ID --format="value(steps[0].args[0])" - [[ "$status" -eq 0 ]] - [[ "$output" -eq "clone" ]] -} - -@test "Verify if cloud-builder in STEP 2 is docker" { - run gcloud builds describe $ID --format="value(steps[1].name)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "docker" ]] -} - -@test "Verify if first build arg in STEP 2 is build" { - run gcloud builds describe $ID --format="value(steps[1].args[0])" - [[ "$status" -eq 0 ]] - [[ "$output" -eq "build" ]] -} - -@test "Verify if relative dir in STEP 2 is npm/examples/hello_world" { - run gcloud builds describe $ID --format="value(steps[1].dir)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "cloud-builders/npm/examples/hello_world" ]] -} - -@test "Verify if image stored in container repo is ${IMAGE_TAG} " { - run gcloud builds describe $ID --format="value(images)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${IMAGE_TAG}" ]] -} - -@test "Deleting deployment ${DEPLOYMENT_NAME}" { - run gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -q - [[ "$status" -eq 0 ]] -} diff --git a/dm/templates/cloudbuild/tests/integration/cloudbuild.yaml b/dm/templates/cloudbuild/tests/integration/cloudbuild.yaml deleted file mode 100644 index 619aea9fdbf..00000000000 --- a/dm/templates/cloudbuild/tests/integration/cloudbuild.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# Test of the Cloud Build template. - -imports: - - path: templates/cloudbuild/cloudbuild.py - name: build.py - -resources: - - name: ${CLOUDBUILD_NAME} - type: build.py - properties: - steps: - - name: 'gcr.io/cloud-builders/git' - args: ['clone', 'https://github.com/GoogleCloudPlatform/cloud-builders'] - - name: 'gcr.io/cloud-builders/docker' - args: [ 'build', '-t', 'gcr.io/${CLOUD_FOUNDATION_PROJECT_ID}/${IMAGE_NAME}', '.' ] - dir: cloud-builders/npm/examples/hello_world - - name: 'gcr.io/cloud-builders/docker' - args: [ 'push', 'gcr.io/${CLOUD_FOUNDATION_PROJECT_ID}/${IMAGE_NAME}'] - images: - - 'gcr.io/${CLOUD_FOUNDATION_PROJECT_ID}/${IMAGE_NAME}' - timeout: '${BUILD_TIMEOUT}' diff --git a/dm/templates/dataproc/README.md b/dm/templates/dataproc/README.md deleted file mode 100644 index e381046e878..00000000000 --- a/dm/templates/dataproc/README.md +++ /dev/null @@ -1,75 +0,0 @@ -# Dataproc - -This template creates a Dataproc cluster. - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) - -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) - -- Enable the [Cloud Dataproc API](https://cloud.google.com/dataproc/docs/reference/rest/) - -- Grant the [Dataproc Worker](https://cloud.google.com/dataproc/docs/concepts/iam/iam) - IAM role to the Deployment Manager service account - -## Deployment - -### Resources - -- [compute.v1.instance](https://cloud.google.com/compute/docs/reference/rest/v1/instances) -- [compute.v1.instanceTemplate](https://cloud.google.com/compute/docs/reference/latest/instanceTemplates) -- [compute.v1.instanceGroup](https://cloud.google.com/compute/docs/reference/latest/instanceGroups) -- [dataproc.v1.cluster](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.clusters) - -### Properties - -See the `properties` section in the schema file(s): - -- [Dataproc](dataproc.py.schema) - -### Usage - -1. Clone the [Deployment Manager samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit): - -```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit -``` - -2. Go to the [dm](../../) directory: - -```shell - cd dm -``` - -3. Copy the example DM config to be used as a model for the deployment; in - this case, [examples/dataproc.yaml](examples/dataproc.yaml): - -```shell - cp templates/dataproc/examples/dataproc.yaml my_dataproc.yaml -``` - -4. Change the values in the config file to match your specific GCP setup - (for properties, refer to the schema files listed above): - -```shell - vim my_dataproc.yaml # <== change values to match your GCP setup -``` - -5. Create your deployment (replace \ with the - relevant deployment name): - -```shell - gcloud deployment-manager deployments create \ - --config my_dataproc.yaml -``` - -6. In case you need to delete your deployment: - -```shell - gcloud deployment-manager deployments delete -``` - -## Examples - -- [Dataproc](examples/dataproc.yaml) diff --git a/dm/templates/dataproc/dataproc.py b/dm/templates/dataproc/dataproc.py deleted file mode 100644 index a3295d2517b..00000000000 --- a/dm/templates/dataproc/dataproc.py +++ /dev/null @@ -1,164 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" This template creates a Dataproc cluster. """ - -import six - -NODES_SCHEMA = { - 'numInstances': None, - 'isPreemptible': None, - 'machineType': 'machineTypeUri', - 'accelerators': None, -} - -def get_disk_config(properties): - """ If any disk property is specified, creates the diskConfig section. """ - - disk_schema = { - 'diskType': 'bootDiskType', - 'diskSizeGb': 'bootDiskSizeGb', - 'numLocalSsds': None - } - - return read_configuration(properties, disk_schema) - - -def read_configuration(properties, schema): - """ Creates a new config section by reading and renaming properties from - the source section. - """ - - if any(name in properties for name in schema): - config = {} - for name, rename_to in six.iteritems(schema): - add_optional_property(config, properties, name, rename_to) - return config - - return None - - -def get_instance_group_config(properties, image, cluster_schema): - """ Creates a cluster instance group. """ - - config = read_configuration(properties, cluster_schema) - - disk_config = get_disk_config(properties) - if disk_config: - config['diskConfig'] = disk_config - - if image: - config['imageUri'] = image - - return config - - -def add_optional_property(destination, source, property_name, rename_to=None): - """ Copies each property defined in the source object to the destination - object. - """ - - rename_to = rename_to or property_name - if property_name in source: - destination[rename_to] = source[property_name] - - -def get_gce_cluster_config(properties): - """ Creates the configuration section for a cluster. """ - - gce_schema = { - 'zone': 'zoneUri', - 'network': 'networkUri', - 'subnetwork': 'subnetworkUri', - 'serviceAccountEmail': 'serviceAccount', - 'serviceAccountScopes': None, - 'internalIpOnly': None, - 'networkTags': 'tags', - 'metadata': None - } - - if 'network' in properties and 'subnetwork' in properties: - msg = 'Specifying both "network" and "subnetwork" is not allowed.' - raise ValueError(msg) - - return read_configuration(properties, gce_schema) - - -def set_instance_group_config(properties, cluster, image, instance_group): - """ Assign instance group config to the cluster. """ - - group_spec = properties.get(instance_group) - group_schema = NODES_SCHEMA - group_config = get_instance_group_config(group_spec, image, group_schema) - config_name = instance_group + 'Config' - cluster['properties']['config'][config_name] = group_config - config_output_path = 'ref.{}.config.{}'.format(cluster['name'], config_name) - - return { - 'name': '{}InstanceNames'.format(instance_group), - 'value': '$({}.instanceNames)'.format(config_output_path) - } - - -def generate_config(context): - """ Entry point for the deployment resources. """ - - properties = context.properties - name = properties.get('name', context.env['name']) - project_id = properties.get('project', context.env['project']) - image = context.properties.get('image') - region = properties['region'] - - cluster_config = get_gce_cluster_config(properties) - - cluster = { - 'name': context.env['name'], - # https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.clusters - 'type': 'gcp-types/dataproc-v1:projects.regions.clusters', - 'properties': - { - 'clusterName': name, - 'projectId': project_id, - 'region': region, - 'config': { - 'gceClusterConfig': cluster_config, - } - } - } - - for prop in ['configBucket', 'softwareConfig', 'initializationActions', 'encryptionConfig']: - add_optional_property(cluster['properties']['config'], properties, prop) - add_optional_property(cluster['properties'], properties, 'labels') - - outputs = [ - { - 'name': 'name', - 'value': name - }, - { - 'name': 'configBucket', - 'value': '$(ref.{}.config.configBucket)'.format(context.env['name']) - } - ] - - for instance_group in ['master', 'worker', 'secondaryWorker']: - if instance_group in properties: - instance_group_output = set_instance_group_config( - properties, - cluster, - image, - instance_group - ) - outputs.append(instance_group_output) - - return {'resources': [cluster], 'outputs': outputs} diff --git a/dm/templates/dataproc/dataproc.py.schema b/dm/templates/dataproc/dataproc.py.schema deleted file mode 100644 index 60205820ada..00000000000 --- a/dm/templates/dataproc/dataproc.py.schema +++ /dev/null @@ -1,295 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Dataproc - author: Sourced Group Inc. - version: 1.0.0 - description: | - Creates a Dataproc cluster. - - For more information on this resource: - https://cloud.google.com/compute/ - - APIs endpoints used by this template: - - gcp-types/dataproc-v1:projects.regions.clusters => - https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.clusters - -additionalProperties: false - -definitions: - nodeConfig: - properties: - numInstances: - type: integer - description: The number of VM instances in the instance group. - isPreemptible: - type: boolean - description: | - If True, specifies that the instance group consists of preemptible - instances. - imageUri: - type: string - description: | - The Compute Engine image resource used for cluster instances. - It can be specified or may be inferred from SoftwareConfig.image_version. - machineType: - type: string - description: | - The Compute Engine machine type used for the cluster instances. - A full URL, partial URI, or short name are valid. Examples: - - https://www.googleapis.com/compute/v1/projects/[projectId]/zones/us-east1-a/machineTypes/n1-standard-2 - - projects/[projectId]/zones/us-east1-a/machineTypes/n1-standard-2 - - n1-standard-2 - diskType: - type: string - default: pd-standard - description: The boot disk type. - enum: - - pd-standard - - pd-ssd - diskSizeGb: - type: integer - default: 500 - description: The boot disk size in GB. - numLocalSsds: - type: integer - default: 0 - description: The number of attached SSDs. - minimum: 0 - maximum: 4 - accelerators: - type: array - uniqItems: true - description: | - The Compute Engine accelerator configuration for these instances. - - Beta Feature: This feature is still under development. It may be changed before final release - items: - type: object - additionalProperties: false - properties: - acceleratorTypeUri: - type: string - description: | - Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. - See Compute Engine AcceleratorTypes. - - Examples: - - https://www.googleapis.com/compute/beta/projects/[projectId]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80 - projects/[projectId]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80 - nvidia-tesla-k80 - Auto Zone Exception: If you are using the Cloud Dataproc Auto Zone Placement feature, - you must use the short name of the accelerator type resource, for example, nvidia-tesla-k80. - acceleratorCount: - type: number - description: | - The number of the accelerator cards of this type exposed to this instance. - -properties: - name: - type: string - description: | - The cluster name. Resource name would be used if omitted. - project: - type: string - description: | - The project ID of the project containing the service. - labels: - type: object - description: | - Optional. The labels to associate with this cluster. Label keys must contain 1 to 63 characters, - and must conform to RFC 1035. Label values may be empty, but, if present, must contain 1 to 63 characters, - and must conform to RFC 1035. No more than 32 labels can be associated with a cluster. - - An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. - region: - type: string - default: global - description: | - The region where the Compute Engine cluster is located. When deployed - in the 'global' region, or with no region specified, the 'zone' parameter - must be provided. - zone: - type: string - description: | - The zone where the Compute Engine cluster is located. A full URL, partial - URI, or short name are valid. Examples: - - https://www.googleapis.com/compute/v1/projects/[projectId]/zones/[zone] - - projects/[projectId]/zones/[zone] - - us-central1-f - Mandatory if the region parameter value is 'global', or is not defined. - image: - type: string - description: | - The Compute Engine image resource used for cluster instances. Can be - either specified or inferred from 'softwareConfig.imageVersion'. - serviceAccountEmail: - type: string - description: | - The service account of the instances. Defaults to the default Compute - Engine service account. - serviceAccountScopes: - type: array - uniqItems: true - description: | - A list of URIs of service account scopes to be included in the Compute - Engine instances. - items: - type: string - description: | - The URI of service account scope to be included in the Compute Engine - instances. - internalIpOnly: - type: boolean - description: | - If True, all instances in the cluster have only internal IP addresses. - network: - type: string - description: | - The Compute Engine network to be used for machine communications. Cannot - be specified if the 'subnetwork' value is provided. - subnetwork: - type: string - description: | - The Compute Engine subnetwork to be used for machine communications. - Cannot be specified if the 'network' value is provided. - networkTags: - type: array - description: A list of Compute Engine tags to add to all instances. - items: - type: string - metadata: - type: object - description: | - The Compute Engine metadata key-value entries to add to all instances. - configBucket: - type: string - description: | - The Cloud Storage staging bucket used for sharing the generated SSH keys - and config. - softwareConfig: - type: object - additionalProperties: false - description: | - The selection and config of software inside the cluster. - properties: - imageVersion: - type: string - description: | - The version of the software inside the cluster. One of the - supported Cloud Dataproc Versions, such as "1.2" (including a - subminor version, such as "1.2.29"), or the "preview" version. - properties: - type: object - description: | - The key-value pairs for properties to set on the daemon config files. - optionalComponents: - type: array - uniqItems: true - description: | - The set of optional components to activate on the cluster. - items: - type: string - enum: - - COMPONENT_UNSPECIFIED - - ANACONDA - - HIVE_WEBHCAT - - JUPYTER - - ZEPPELIN - initializationActions: - type: array - uniqItems: true - description: | - A list of commands to execute on each node after the config is completed. - items: - type: object - additionalProperties: false - description: | - The executable to run on a fully configured node + the timeout - period for the executable completion. - properties: - executableFile: - type: string - description: The Cloud Storage URI of the executable file. - executableTimeout: - type: string - description: | - The executable completion timeout, e.g. "3.5s". The default value - is 10 minutes. - encryptionConfig: - type: object - additionalProperties: false - description: | - Encryption settings for the cluster. - required: - - gcePdKmsKeyName - properties: - gcePdKmsKeyName: - type: string - descritption: | - The Cloud KMS key name to use for PD disk encryption for all instances in the cluster. - master: - type: object - additionalProperties: false - description: | - The Compute Engine config settings for the master instance in the - cluster. - $ref: '#/definitions/nodeConfig' - worker: - type: object - additionalProperties: false - description: | - The Compute Engine config settings for worker instances in the cluster. - $ref: '#/definitions/nodeConfig' - secondaryWorker: - type: object - additionalProperties: false - description: | - The Compute Engine config settings for additional worker instances in - the cluster. - $ref: '#/definitions/nodeConfig' - -outputs: - masterInstanceNames: - type: array - description: When configured, the list of master instance names. - items: - type: string - workerInstanceNames: - type: array - description: When configured, the list of worker instance names. - items: - type: string - secondaryWorkerInstanceNames: - type: array - description: | - When configured, the list of additional worker instance names. - items: - type: string - name: - type: string - description: The cluster name. - configBucket: - type: string - description: | - A Cloud Storage staging bucket used for sharing generated SSH keys - and config. - -documentation: - - templates/dataproc/README.md - -examples: - - templates/dataproc/examples/dataproc.yaml diff --git a/dm/templates/dataproc/examples/dataproc.yaml b/dm/templates/dataproc/examples/dataproc.yaml deleted file mode 100644 index b45854b8492..00000000000 --- a/dm/templates/dataproc/examples/dataproc.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Example of the Dataproc template usage. -# -# In this example, a Dataproc cluster is created. - -imports: - - path: templates/dataproc/dataproc.py - name: dataproc.py - -resources: - - name: dataproc-cluster - type: dataproc.py - properties: - zone: us-central1-a - region: global - networkTags: - - http - master: - numInstances: 1 - machineType: n1-standard-8 - diskSizeGb: 100 - diskType: pd-ssd - worker: - numInstances: 2 - machineType: n1-standard-4 - secondaryWorker: - numInstances: 2 - isPreemptible: true diff --git a/dm/templates/dataproc/tests/integration/dataproc.bats b/dm/templates/dataproc/tests/integration/dataproc.bats deleted file mode 100755 index e4cb3198810..00000000000 --- a/dm/templates/dataproc/tests/integration/dataproc.bats +++ /dev/null @@ -1,119 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores with dashes in the deployment name. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" - export CLUSTER_NAME="dataproc-cluster-${RAND}" - export ZONE="us-central1-a" - export TAG="test-tag-${RAND}" - export MASTER_INSTANCES="1" - export MASTER_TYPE="n1-standard-8" - export MASTER_DISK_SIZE="100" - export MASTER_DISK_TYPE="pd-ssd" - export WORKER_INSTANCES="2" - export WORKER_TYPE="n1-standard-4" - export SECONDARY_WORKER_INSTANCES="1" - export SECONDARY_WORKER_PREEMPTIBLE="true" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < ${BATS_TEST_DIRNAME}/${TEST_NAME}.yaml > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - rm -f "${RANDOM_FILE}" - delete_config - fi - - # Per-test teardown steps. -} - - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - run gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config "${CONFIG}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] -} - -@test "Verifying that ${CLUSTER_NAME} settings are correct" { - run gcloud dataproc clusters describe "${CLUSTER_NAME}" \ - --format="yaml(config.gceClusterConfig)" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${ZONE}" ]] - [[ "$output" =~ "${TAG}" ]] -} - -@test "Verifying that master group settings are correct" { - run gcloud dataproc clusters describe "${CLUSTER_NAME}" \ - --format="yaml(config.masterConfig)" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "numInstances: ${MASTER_INSTANCES}" ]] - [[ "$output" =~ "bootDiskSizeGb: ${MASTER_DISK_SIZE}" ]] - [[ "$output" =~ "bootDiskType: ${MASTER_DISK_TYPE}" ]] - [[ "$output" =~ "${MASTER_TYPE}" ]] -} - -@test "Verifying that worker group settings are correct" { - run gcloud dataproc clusters describe "${CLUSTER_NAME}" \ - --format="yaml(config.workerConfig)" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "numInstances: ${WORKER_INSTANCES}" ]] - [[ "$output" =~ "${WORKER_TYPE}" ]] - [[ "$output" =~ "bootDiskSizeGb: 500" ]] # Default size - [[ "$output" =~ "bootDiskType: pd-standard" ]] # Default type -} - -@test "Verifying that secondary worker group settings are correct" { - run gcloud dataproc clusters describe "${CLUSTER_NAME}" \ - --format="yaml(config.secondaryWorkerConfig)" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "numInstances: ${SECONDARY_WORKER_INSTANCES}" ]] - [[ "$output" =~ "${WORKER_TYPE}" ]] # Copied from worker node - [[ "$output" =~ "isPreemptible: ${SECONDARY_WORKER_PREEMPTIBLE}" ]] -} - -@test "Deleting deployment" { - run gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" -q \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] -} diff --git a/dm/templates/dataproc/tests/integration/dataproc.yaml b/dm/templates/dataproc/tests/integration/dataproc.yaml deleted file mode 100644 index 9d03c3c2d5e..00000000000 --- a/dm/templates/dataproc/tests/integration/dataproc.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Test of the Dataproc template. -# -# Variables: -# RAND: a random string used by the testing suite. -# - -imports: - - path: templates/dataproc/dataproc.py - name: dataproc.py - -resources: - - name: ${CLUSTER_NAME} - type: dataproc.py - properties: - zone: ${ZONE} - networkTags: - - ${TAG} - master: - numInstances: ${MASTER_INSTANCES} - machineType: ${MASTER_TYPE} - diskSizeGb: ${MASTER_DISK_SIZE} - diskType: ${MASTER_DISK_TYPE} - worker: - numInstances: ${WORKER_INSTANCES} - machineType: ${WORKER_TYPE} - secondaryWorker: - numInstances: ${SECONDARY_WORKER_INSTANCES} - isPreemptible: ${SECONDARY_WORKER_PREEMPTIBLE} diff --git a/dm/templates/dns_managed_zone/README.md b/dm/templates/dns_managed_zone/README.md deleted file mode 100644 index ac80d5cc690..00000000000 --- a/dm/templates/dns_managed_zone/README.md +++ /dev/null @@ -1,74 +0,0 @@ -# Cloud DNS Managed Zone - -This template creates a managed zone in the Cloud DNS (Domain Name System). - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) -- Grant the [dns.admin](https://cloud.google.com/dns/access-control) IAM role to the Deployment Manager service account - -## Deployment - -### Resources - -- [gcp-types/dns-v1:managedZones](https://cloud.google.com/dns/docs/reference/v1/managedZones) - -### Properties - -See the `properties` section in the schema file(s): -- [Cloud DNS Managed Zone](dns_managed_zone.py.schema) - -### Usage - -1. Clone the [Deployment Manager samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit): - -```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit -``` - -2. Go to the [dm](../../) directory: - -```shell - cd dm -``` - -3. Copy the example DM config to be used as a model for the deployment; in this case, [examples/dns_managed_zone.yaml](examples/dns_managed_zone.yaml): - -```shell - cp templates/dns_managed_zone/examples/dns_managed_zone.yaml my_dns_managed_zone.yaml -``` - -4. Change the values in the config file to match your specific GCP setup (for properties, refer to the schema files listed above): - -```shell - vim my_dns_managed_zone.yaml # <== change values to match your GCP setup -``` - -5. Create your deployment (replace with the relevant deployment name): - -```shell - gcloud deployment-manager deployments create \ - --config my_dns_managed_zone.yaml -``` - -6. In case you need to delete your deployment: - -```shell - gcloud deployment-manager deployments delete -``` - -## Examples -- [Cloud DNS Managed Zone](examples/dns_managed_zone.yaml) -- [Cloud DNS Managed Zone with legacy property](examples/dns_managed_zone_legacy.yaml) -- [Managed Zone with `public visibility`](examples/dns_managed_zone_public.yaml) -- [Managed Zone with `private visibility`](examples/dns_managed_zone_private.yaml) -- [Managed Zone with `private visibility config`](examples/dns_managed_zone_private_visibility_config.yaml) - -## Tests Cases -- [Simple Managed Zone Test](tests/integration/dns_mz_simple.bats) -- [Backward Compatibility Test](tests/integration/dns_mz_bkwrd_cmptb.bats) -- [Managed Zone with `public visibility`](tests/integration/dns_mz_public.bats) -- [Managed Zone with `private visibility`](tests/integration/dns_mz_private.bats) -- [Managed Zone with `private visibility config`](tests/integration/dns_mz_prvt_vsblt_cfg.bats) -- [Managed Zone with `cross-project reference`](tests/integration/dns_mz_cross_project.bats) \ No newline at end of file diff --git a/dm/templates/dns_managed_zone/dns_managed_zone.py b/dm/templates/dns_managed_zone/dns_managed_zone.py deleted file mode 100644 index abc203b5d16..00000000000 --- a/dm/templates/dns_managed_zone/dns_managed_zone.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""This template creates a managed zone resource in the Cloud DNS.""" - - -def generate_config(context): - """Entry point for the deployment resources.""" - # Backward Compatibility with the old property `zoneName` - try: - managed_zone_name = context.properties['zoneName'] - except KeyError: - managed_zone_name = context.properties.get('name', context.env['name']) - dnsname = context.properties['dnsName'] - managed_zone_description = context.properties['description'] - name_servers = '$(ref.' + context.env['name'] + '.nameServers)' - project_id = context.properties.get('project', context.env['project']) - - resources = [] - outputs = [ - { - 'name': 'dnsName', - 'value': dnsname - }, - { - 'name': 'managedZoneDescription', - 'value': managed_zone_description - }, - { - 'name': 'nameServers', - 'value': name_servers - }, - { - 'name': 'managedZoneName', - 'value': managed_zone_name - } - ] - - managed_zone = { - 'name': context.env['name'], - # https://cloud.google.com/dns/docs/reference/v1/managedZones - 'type': 'gcp-types/dns-v1:managedZones', - 'properties': { - 'name': managed_zone_name, - 'dnsName': dnsname, - 'description': managed_zone_description, - 'project_id': project_id - } - } - - # making resources and outputs additional properties - for prop in context.properties: - if prop not in managed_zone['properties']: - managed_zone['properties'][prop] = context.properties[prop] - outputs.append( - { - 'name': prop, - 'value': context.properties[prop] - } - ) - resources.append(managed_zone) - - return {'resources': resources, 'outputs': outputs} diff --git a/dm/templates/dns_managed_zone/dns_managed_zone.py.schema b/dm/templates/dns_managed_zone/dns_managed_zone.py.schema deleted file mode 100644 index c54cab3c4c2..00000000000 --- a/dm/templates/dns_managed_zone/dns_managed_zone.py.schema +++ /dev/null @@ -1,253 +0,0 @@ -# Copyright 2016 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Cloud DNS Managed Zone - author: Source Group Inc. - version: 1.0.0 - description: | - Creates a managed zone in the Cloud DNS. - - For more information on this resource: - - https://cloud.google.com/dns/zones/ - - APIs endpoints used by this template: - - gcp-types/dns-v1:managedZones => - https://cloud.google.com/dns/docs/reference/v1/managedZones - -# Note: Supported Backward Compatibility with the old property `zoneName` -oneOf: - - required: - - dnsName - - zoneName - - required: - - dnsName - - name - -additionalProperties: false - -properties: - project: - type: string - description: | - The Project ID for Cross-Project Reference. - zoneName: - type: string - pattern: ^[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?$ - description: | - Old resource name to support backward compatablility. - Value is rescricted by API pattern for `resource.name` - The name must be 1-63 characters long, must begin with a letter, end - with a letter or digit, and only contain lowercase letters, digits or dashes. - description: - type: string - pattern: ^.{0,1023}$ - description: | - A description of the managed zone. A mutable string, max 1024 characters - long. Associated with the resource for users' convenience; does not affect - managed zone's function. - dnsName: - type: string - pattern: ^([(a-z)\d\-]{1,62}\.){1,3}([(a-z)\d\-]{1,61}){0,1}\.$ - description: | - The DNS name of the managed zone; for example, "example.com." - Make sure that the value ends with a period "." - dnssecConfig: - type: object - description: DNSSEC configuration. - additionalProperties: false - required: - - kind - - state - - defaultKeySpecs - proeprties: - defaultKeySpecs: - type: array - uniqueItems: true - description: | - Specifies parameters that will be used for generating initial DnsKeys - for this ManagedZone. Output only while state is not OFF. - items: - type: object - additionalProperties: false - required: - - kind - - algorithm - - keyType - - keyLength - properties: - algorithm: - oneOf: - - type: string - pattern: ^ecdsap(256|384)sha(256|384)$ - - type: string - pattern: ^rsasha(1|256|512)$ - description: | - String mnemonic specifying the DNSSEC algorithm of this key. - Acceptable values are: - - "ecdsap256sha256" - - "ecdsap384sha384" - - "rsasha1" - - "rsasha256" - - "rsasha512" - keyLength: - type: integer - description: Length of the keys in bits. - keyType: - type: string - pattern: ^(key|zone)Signing$ - description: | - Specifies whether this is a key signing key (KSK) or a zone - signing key (ZSK). Key signing keys have the Secure Entry Point - flag set and, when active, will only be used to sign resource - record sets of type DNSKEY. Zone signing keys do not have the - Secure Entry Point flag set and will be used to sign all other - types of resource record sets. - Acceptable values are: - - "keySigning" - - "zoneSigning" - kind: - type: string - pattern: ^dns#managedZoneDnsSecConfig$ - default: "dns#managedZoneDnsSecConfig" - description: | - Identifies what kind of resource this is. - Value: the fixed string "dns#managedZoneDnsSecConfig". - nonExistence: - type: string - description: | - Specifies the mechanism used to provide authenticated - denial-of-existence responses. Output only while state is not OFF. - Acceptable values are: - - "nsec" - - "nsec3" - pattern: ^nsec3?$ - state: - type: string - pattern: ^(on|off|transfer)$ - description: | - Specifies whether DNSSEC is enabled, and what mode it is in. - Acceptable values are: - - "off" - - "on" - - "transfer" - kind: - type: string - pattern: ^dns#managedZone$ - default: "dns#managedZone" - description: | - Identifies what kind of resource this is. - Value is the fixed string "dns#managedZone". - labels: - type: object - description: User labels. - propertyNames: - type: string - name: - type: string - pattern: ^[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?$ - description: | - User assigned name for this resource. Must be unique within the project. - The name must be 1-63 characters long, must begin with a letter, end - with a letter or digit, and only contain lowercase letters, digits or dashes. - nameServerSet: - type: string - description: | - Optionally specifies the NameServerSet for this ManagedZone. A - NameServerSet is a set of DNS name servers that all host the same - ManagedZones. Most users will leave this field unset. - nameServers: - type: array - description: | - Delegate your managed_zone to these virtual name servers; defined by the - server (output only) - privateVisibilityConfig: - type: object - description: | - For privately visible zones, the set of Virtual Private Cloud resources - that the zone is visible from. - additionalProperties: false - properties: - kind: - type: string - pattern: ^dns#managedZonePrivateVisibilityConfig$ - description: | - Identifies what kind of resource this is. - Value: the fixed string "dns#managedZonePrivateVisibilityConfig" - networks: - type: array - items: - type: object - additionalProperties: false - required: - - kind - - networkUrl - properties: - kind: - type: string - pattern: ^dns#managedZonePrivateVisibilityConfigNetwork$ - description: | - Identifies what kind of resource this is. - Value: the fixed string "dns#managedZonePrivateVisibilityConfigNetwork". - networkUrl: - type: string - pattern: ^https:\/\/www.googleapis.com\/compute\/v1\/projects\/[a-zA-Z0-9_-]+\/global\/networks\/[a-zA-Z0-9_-]+$ - description: | - The fully qualified URL of the VPC network to bind to. This should be formatted - like https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network} - visibility: - type: string - pattern: ^(public|private)$ - description: | - The zone's visibility. Public zones are exposed to the Internet, while - private zones are visible only to Virtual Private Cloud resources. - Acceptable values are "private" and "public". - -outputs: - dnsName: - type: string - description: The DNS name of the managed zone. - managedZoneDescription: - type: string - description: The description of the managed zone. - nameServers: - type: array - description: | - The list of nameservers that will be authoritative for this domain. - managedZoneName: - type: string - description: The managed zone's resource name. - visibility: - type: string - description: | - The zone's visibility. Public zones are exposed to the Internet, - while private zones are visible only to Virtual Private Cloud - resources. - privateVisibilityConfig: - type: object - description: | - For privately visible zones, the set of Virtual Private Cloud - resources that the zone is visible from. - dnssecConfig: - type: object - description: DNSSEC configuration. - -documentation: - - templates/dns_managed_zone/README.md - -examples: - - templates/dns_managed_zone/examples/dns_managed_zone.yaml - - templates/dns_managed_zone/examples/dns_managed_zone_private.yaml - - templates/dns_managed_zone/examples/dns_managed_zone_private_visibility_config.yaml - - templates/dns_managed_zone/examples/dns_managed_zone_public.yaml diff --git a/dm/templates/dns_managed_zone/examples/dns_managed_zone.yaml b/dm/templates/dns_managed_zone/examples/dns_managed_zone.yaml deleted file mode 100644 index 5755fdb2a61..00000000000 --- a/dm/templates/dns_managed_zone/examples/dns_managed_zone.yaml +++ /dev/null @@ -1,16 +0,0 @@ -# Example of the DNS managed zone template usage. -# -# In this example, a DNS managed zone is created with the use of -# the `name` and `dnsName` properties. - -imports: - - path: templates/dns_managed_zone/dns_managed_zone.py - name: dns_managed_zone.py - -resources: - - name: test-managed-zone - type: dns_managed_zone.py - properties: - name: test-managed-zone - dnsName: foobar.local. - description: 'My foobar DNS Managed Zone' diff --git a/dm/templates/dns_managed_zone/examples/dns_managed_zone_legacy.yaml b/dm/templates/dns_managed_zone/examples/dns_managed_zone_legacy.yaml deleted file mode 100644 index 0f2dcb6e104..00000000000 --- a/dm/templates/dns_managed_zone/examples/dns_managed_zone_legacy.yaml +++ /dev/null @@ -1,16 +0,0 @@ -# Example of the DNS managed zone template usage. -# -# In this example, a DNS managed zone is created with the use of -# the old `zoneName` and `dnsName` properties. - -imports: - - path: templates/dns_managed_zone/dns_managed_zone.py - name: dns_managed_zone.py - -resources: - - name: test-managed-zone - type: dns_managed_zone.py - properties: - zoneName: test-managed-zone - dnsName: foobar.local. - description: 'My foobar DNS Managed Zone' diff --git a/dm/templates/dns_managed_zone/examples/dns_managed_zone_private.yaml b/dm/templates/dns_managed_zone/examples/dns_managed_zone_private.yaml deleted file mode 100644 index 49caf906e28..00000000000 --- a/dm/templates/dns_managed_zone/examples/dns_managed_zone_private.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# Example of the DNS managed zone template usage. -# -# In this example, a private DNS managed zone is created with the use of -# the `visibility` and `dnsName` properties. - -imports: - - path: templates/dns_managed_zone/dns_managed_zone.py - name: dns_managed_zone.py - -resources: - - name: private-mz - type: dns_managed_zone.py - properties: - name: private-mz - dnsName: private-mz.local. - description: "Private DNS Managed Zone" - visibility: private diff --git a/dm/templates/dns_managed_zone/examples/dns_managed_zone_private_visibility_config.yaml b/dm/templates/dns_managed_zone/examples/dns_managed_zone_private_visibility_config.yaml deleted file mode 100644 index 9c6da977d5d..00000000000 --- a/dm/templates/dns_managed_zone/examples/dns_managed_zone_private_visibility_config.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# Example of the DNS managed zone template usage. -# -# In this example, a private DNS managed zone is created with the use of -# the `visibility` and `privateVisibilityConfig` properties. -# : a valid project name. -# : a valid VPC network name. - -imports: - - path: templates/dns_managed_zone/dns_managed_zone.py - name: dns_managed_zone.py - -resources: - - name: private-mz-with-visibility - type: dns_managed_zone.py - properties: - name: private-mz-with-visibility - dnsName: private-visibility.local. - description: "Private DNS Managed Zone with visibility config" - visibility: private - privateVisibilityConfig: - kind: "dns#managedZonePrivateVisibilityConfig" - networks: - - kind: "dns#managedZonePrivateVisibilityConfigNetwork" - networkUrl: "https://www.googleapis.com/compute/v1/projects//global/networks/" diff --git a/dm/templates/dns_managed_zone/examples/dns_managed_zone_public.yaml b/dm/templates/dns_managed_zone/examples/dns_managed_zone_public.yaml deleted file mode 100644 index 6fa68ebd94e..00000000000 --- a/dm/templates/dns_managed_zone/examples/dns_managed_zone_public.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# Example of the DNS managed zone template usage. -# -# In this example, a Public DNS managed zone is created with the use of -# the `zoneName`, `dnsName` and `visibility` properties. - -imports: - - path: templates/dns_managed_zone/dns_managed_zone.py - name: dns_managed_zone.py - -resources: - - name: public-mz - type: dns_managed_zone.py - properties: - name: public-mz - dnsName: public-test.local. - description: "Public DNS Managed Zone" - visibility: public diff --git a/dm/templates/dns_managed_zone/tests/integration/dns_mz_bkwrd_cmptb.bats b/dm/templates/dns_managed_zone/tests/integration/dns_mz_bkwrd_cmptb.bats deleted file mode 100755 index 83bb5088792..00000000000 --- a/dm/templates/dns_managed_zone/tests/integration/dns_mz_bkwrd_cmptb.bats +++ /dev/null @@ -1,86 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" - export CLOUDDNS_ZONE_NAME="test-managed-zone-${RAND}" - export CLOUDDNS_DNS_NAME="${RAND}.com." - export CLOUDDNS_DESCRIPTION="Managed DNS Zone for Testing" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < templates/dns_managed_zone/tests/integration/${TEST_NAME}.yaml > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; this is executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; this is executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - delete_config - rm -f "${RANDOM_FILE}" - fi - - # Per-test teardown steps. -} - - -########## TESTS ########## - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config "${CONFIG}" --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] -} - -@test "Verify if a managed zone with name $CLOUDDNS_ZONE_NAME was created" { - run gcloud dns managed-zones list --format=flattened \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${CLOUDDNS_ZONE_NAME}" ]] -} - -@test "Verify if a DNS named ${CLOUDDNS_DNS_NAME} was created" { - run gcloud dns managed-zones list --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${CLOUDDNS_DNS_NAME}" ]] -} - -@test "Deleting deployment ${DEPLOYMENT_NAME}" { - gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" \ - -q --project "${CLOUD_FOUNDATION_PROJECT_ID}" - run gcloud dns managed-zones list - [[ "$status" -eq 0 ]] - [[ ! "$output" =~ "${CLOUDDNS_ZONE_NAME}" ]] -} \ No newline at end of file diff --git a/dm/templates/dns_managed_zone/tests/integration/dns_mz_bkwrd_cmptb.yaml b/dm/templates/dns_managed_zone/tests/integration/dns_mz_bkwrd_cmptb.yaml deleted file mode 100644 index 48e35ae5680..00000000000 --- a/dm/templates/dns_managed_zone/tests/integration/dns_mz_bkwrd_cmptb.yaml +++ /dev/null @@ -1,19 +0,0 @@ -# Test Case: Backward Compatibility -# Use Case: -# You have updated CFT code base up to the latest version and now it works -# with your old-style written templates in a slightly different way. -# -# F.e.: `zoneName` property is now replaced by `name` to align syntax with -# the naming convention of the API. - -imports: - - path: templates/dns_managed_zone/dns_managed_zone.py - name: dns_managed_zone.py - -resources: - - name: ${CLOUDDNS_ZONE_NAME}-resource - type: dns_managed_zone.py - properties: - zoneName: ${CLOUDDNS_ZONE_NAME} - dnsName: ${CLOUDDNS_DNS_NAME} - description: ${CLOUDDNS_DESCRIPTION} diff --git a/dm/templates/dns_managed_zone/tests/integration/dns_mz_cross_project.bats b/dm/templates/dns_managed_zone/tests/integration/dns_mz_cross_project.bats deleted file mode 100755 index 2d1b32274c1..00000000000 --- a/dm/templates/dns_managed_zone/tests/integration/dns_mz_cross_project.bats +++ /dev/null @@ -1,91 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" - export CLOUDDNS_ZONE_NAME="test-managed-zone-${RAND}" - export CLOUDDNS_DNS_NAME="${RAND}.com." - export CLOUDDNS_DESCRIPTION="Managed DNS Zone for Testing" -fi - -if [ -z "${CLOUDDNS_CROSS_PROJECT_ID}" ]; then - echo "CLOUDDNS_CROSS_PROJECT_ID is not set, nothing to test." >&2 - exit 1 -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < templates/dns_managed_zone/tests/integration/${TEST_NAME}.yaml > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; this is executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; this is executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - delete_config - rm -f "${RANDOM_FILE}" - fi - - # Per-test teardown steps. -} - - -########## TESTS ########## - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config "${CONFIG}" --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] -} - -@test "Verify if a managed zone with name $CLOUDDNS_ZONE_NAME was created" { - run gcloud dns managed-zones list --format=flattened \ - --project "${CLOUDDNS_CROSS_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${CLOUDDNS_ZONE_NAME}" ]] -} - -@test "Verify if a DNS named ${CLOUDDNS_DNS_NAME} was created" { - run gcloud dns managed-zones list --project "${CLOUDDNS_CROSS_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${CLOUDDNS_DNS_NAME}" ]] -} - -@test "Deleting deployment ${DEPLOYMENT_NAME}" { - gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" \ - -q --project "${CLOUD_FOUNDATION_PROJECT_ID}" - run gcloud dns managed-zones list - [[ "$status" -eq 0 ]] - [[ ! "$output" =~ "${CLOUDDNS_ZONE_NAME}" ]] -} diff --git a/dm/templates/dns_managed_zone/tests/integration/dns_mz_cross_project.yaml b/dm/templates/dns_managed_zone/tests/integration/dns_mz_cross_project.yaml deleted file mode 100644 index 5e3b394057e..00000000000 --- a/dm/templates/dns_managed_zone/tests/integration/dns_mz_cross_project.yaml +++ /dev/null @@ -1,22 +0,0 @@ -# Test Case: Cross-Project Reference -# Use Case: -# You have multiple projects with dependancies on each other. -# Like one project assumes a presence of DNS Zone in another in order -# to use it as an endpoint. So within your agregated pipe-line you may want -# to provision resources in both of the projets. -# -# Please note: you should grant Editor permission on the cross-referenced -# Project to your current Google APIs account @cloudservices.gserviceaccount.com - -imports: - - path: templates/dns_managed_zone/dns_managed_zone.py - name: dns_managed_zone.py - -resources: - - name: ${CLOUDDNS_ZONE_NAME}-resource - type: dns_managed_zone.py - properties: - name: ${CLOUDDNS_ZONE_NAME} - dnsName: ${CLOUDDNS_DNS_NAME} - description: ${CLOUDDNS_DESCRIPTION} - project: ${CLOUDDNS_CROSS_PROJECT_ID} diff --git a/dm/templates/dns_managed_zone/tests/integration/dns_mz_private.bats b/dm/templates/dns_managed_zone/tests/integration/dns_mz_private.bats deleted file mode 100755 index a4c6f5ac45c..00000000000 --- a/dm/templates/dns_managed_zone/tests/integration/dns_mz_private.bats +++ /dev/null @@ -1,94 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" - export CLOUDDNS_ZONE_NAME="test-managed-zone-${RAND}" - export CLOUDDNS_DNS_NAME="${RAND}.com." - export CLOUDDNS_DESCRIPTION="Managed DNS Zone for Testing" - export CLOUDDNS_VISIBILITY="private" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < templates/dns_managed_zone/tests/integration/${TEST_NAME}.yaml > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; this is executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; this is executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - delete_config - rm -f "${RANDOM_FILE}" - fi - - # Per-test teardown steps. -} - - -########## TESTS ########## - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config "${CONFIG}" --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] -} - -@test "Verify if a managed zone with name $CLOUDDNS_ZONE_NAME was created" { - run gcloud dns managed-zones list --format=flattened \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${CLOUDDNS_ZONE_NAME}" ]] -} - -@test "Verify if a DNS named ${CLOUDDNS_DNS_NAME} was created" { - run gcloud dns managed-zones list --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${CLOUDDNS_DNS_NAME}" ]] -} - -@test "Verify if visibility is ${CLOUDDNS_VISIBILITY}" { - run gcloud dns managed-zones describe ${CLOUDDNS_ZONE_NAME} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "visibility: ${CLOUDDNS_VISIBILITY}" ]] -} - -@test "Deleting deployment ${DEPLOYMENT_NAME}" { - gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" \ - -q --project "${CLOUD_FOUNDATION_PROJECT_ID}" - run gcloud dns managed-zones list - [[ "$status" -eq 0 ]] - [[ ! "$output" =~ "${CLOUDDNS_ZONE_NAME}" ]] -} diff --git a/dm/templates/dns_managed_zone/tests/integration/dns_mz_private.yaml b/dm/templates/dns_managed_zone/tests/integration/dns_mz_private.yaml deleted file mode 100644 index 2edaa029f93..00000000000 --- a/dm/templates/dns_managed_zone/tests/integration/dns_mz_private.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# Test Case: Private Visibility -# Use Case: -# You want to create a Private Managed Zone, which is not exposed to Internet -# and visible only to Virtual Private Cloud resources. - -imports: - - path: templates/dns_managed_zone/dns_managed_zone.py - name: dns_managed_zone.py - -resources: - - name: ${CLOUDDNS_ZONE_NAME}-resource - type: dns_managed_zone.py - properties: - name: ${CLOUDDNS_ZONE_NAME} - dnsName: ${CLOUDDNS_DNS_NAME} - description: ${CLOUDDNS_DESCRIPTION} - visibility: ${CLOUDDNS_VISIBILITY} diff --git a/dm/templates/dns_managed_zone/tests/integration/dns_mz_prvt_vsblt_cfg.bats b/dm/templates/dns_managed_zone/tests/integration/dns_mz_prvt_vsblt_cfg.bats deleted file mode 100755 index 51dae4a169e..00000000000 --- a/dm/templates/dns_managed_zone/tests/integration/dns_mz_prvt_vsblt_cfg.bats +++ /dev/null @@ -1,102 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" - export CLOUDDNS_ZONE_NAME="test-managed-zone-${RAND}" - export CLOUDDNS_DNS_NAME="${RAND}.com." - export CLOUDDNS_DESCRIPTION="Managed DNS Zone for Testing" - export CLOUDDNS_VISIBILITY="private" - export CLOUDDNS_NETWORK_URL="https://www.googleapis.com/compute/v1/projects/${CLOUD_FOUNDATION_PROJECT_ID}/global/networks/default" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < templates/dns_managed_zone/tests/integration/${TEST_NAME}.yaml > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; this is executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; this is executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - delete_config - rm -f "${RANDOM_FILE}" - fi - - # Per-test teardown steps. -} - - -########## TESTS ########## - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config "${CONFIG}" --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] -} - -@test "Verify if a managed zone with name $CLOUDDNS_ZONE_NAME was created" { - run gcloud dns managed-zones list --format=flattened \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${CLOUDDNS_ZONE_NAME}" ]] -} - -@test "Verify if a DNS named ${CLOUDDNS_DNS_NAME} was created" { - run gcloud dns managed-zones list --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${CLOUDDNS_DNS_NAME}" ]] -} - -@test "Verify if visibility is ${CLOUDDNS_VISIBILITY}" { - run gcloud dns managed-zones describe ${CLOUDDNS_ZONE_NAME} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "visibility: ${CLOUDDNS_VISIBILITY}" ]] -} - -@test "Verify if networkUrl is ${CLOUDDNS_NETWORK_URL}" { - run gcloud dns managed-zones describe ${CLOUDDNS_ZONE_NAME} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "networkUrl: ${CLOUDDNS_NETWORK_URL}" ]] -} - -@test "Deleting deployment ${DEPLOYMENT_NAME}" { - gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" \ - -q --project "${CLOUD_FOUNDATION_PROJECT_ID}" - run gcloud dns managed-zones list - [[ "$status" -eq 0 ]] - [[ ! "$output" =~ "${CLOUDDNS_ZONE_NAME}" ]] -} diff --git a/dm/templates/dns_managed_zone/tests/integration/dns_mz_prvt_vsblt_cfg.yaml b/dm/templates/dns_managed_zone/tests/integration/dns_mz_prvt_vsblt_cfg.yaml deleted file mode 100644 index 642d22e70d0..00000000000 --- a/dm/templates/dns_managed_zone/tests/integration/dns_mz_prvt_vsblt_cfg.yaml +++ /dev/null @@ -1,22 +0,0 @@ -# Test Case: Private Visibility -# Use Case: -# You want to create a Private Managed Zone visible only for specific -# networks of your Virtual Private Cloud. - -imports: - - path: templates/dns_managed_zone/dns_managed_zone.py - name: dns_managed_zone.py - -resources: - - name: ${CLOUDDNS_ZONE_NAME}-resource - type: dns_managed_zone.py - properties: - name: ${CLOUDDNS_ZONE_NAME} - dnsName: ${CLOUDDNS_DNS_NAME} - description: ${CLOUDDNS_DESCRIPTION} - visibility: ${CLOUDDNS_VISIBILITY} - privateVisibilityConfig: - kind: "dns#managedZonePrivateVisibilityConfig" - networks: - - kind: "dns#managedZonePrivateVisibilityConfigNetwork" - networkUrl: ${CLOUDDNS_NETWORK_URL} diff --git a/dm/templates/dns_managed_zone/tests/integration/dns_mz_public.bats b/dm/templates/dns_managed_zone/tests/integration/dns_mz_public.bats deleted file mode 100755 index e48411694bd..00000000000 --- a/dm/templates/dns_managed_zone/tests/integration/dns_mz_public.bats +++ /dev/null @@ -1,95 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" - export CLOUDDNS_ZONE_NAME="test-managed-zone-${RAND}" - export CLOUDDNS_DNS_NAME="${RAND}.com." - export CLOUDDNS_DESCRIPTION="Managed DNS Zone for Testing" - export CLOUDDNS_VISIBILITY="public" - export CLOUDDNS_NETWORKS="default" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < templates/dns_managed_zone/tests/integration/${TEST_NAME}.yaml > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; this is executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; this is executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - delete_config - rm -f "${RANDOM_FILE}" - fi - - # Per-test teardown steps. -} - - -########## TESTS ########## - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config "${CONFIG}" --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] -} - -@test "Verify if a managed zone with name $CLOUDDNS_ZONE_NAME was created" { - run gcloud dns managed-zones list --format=flattened \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${CLOUDDNS_ZONE_NAME}" ]] -} - -@test "Verify if a DNS named ${CLOUDDNS_DNS_NAME} was created" { - run gcloud dns managed-zones list --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${CLOUDDNS_DNS_NAME}" ]] -} - -@test "Verify if visibility is ${CLOUDDNS_VISIBILITY}" { - run gcloud dns managed-zones describe ${CLOUDDNS_ZONE_NAME} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "visibility: ${CLOUDDNS_VISIBILITY}" ]] -} - -@test "Deleting deployment ${DEPLOYMENT_NAME}" { - gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" \ - -q --project "${CLOUD_FOUNDATION_PROJECT_ID}" - run gcloud dns managed-zones list - [[ "$status" -eq 0 ]] - [[ ! "$output" =~ "${CLOUDDNS_ZONE_NAME}" ]] -} diff --git a/dm/templates/dns_managed_zone/tests/integration/dns_mz_public.yaml b/dm/templates/dns_managed_zone/tests/integration/dns_mz_public.yaml deleted file mode 100644 index 328e7dc7278..00000000000 --- a/dm/templates/dns_managed_zone/tests/integration/dns_mz_public.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# Test Case: Public Visibility -# Use Case: -# You want to create a Managed Zone with Public Visibility, -# which makes it exposed to Internet - -imports: - - path: templates/dns_managed_zone/dns_managed_zone.py - name: dns_managed_zone.py - -resources: - - name: ${CLOUDDNS_ZONE_NAME}-resource - type: dns_managed_zone.py - properties: - name: ${CLOUDDNS_ZONE_NAME} - dnsName: ${CLOUDDNS_DNS_NAME} - description: ${CLOUDDNS_DESCRIPTION} - visibility: ${CLOUDDNS_VISIBILITY} diff --git a/dm/templates/dns_managed_zone/tests/integration/dns_mz_simple.bats b/dm/templates/dns_managed_zone/tests/integration/dns_mz_simple.bats deleted file mode 100755 index ab1bf5af83c..00000000000 --- a/dm/templates/dns_managed_zone/tests/integration/dns_mz_simple.bats +++ /dev/null @@ -1,87 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" - export CLOUDDNS_ZONE_NAME="test-managed-zone-${RAND}" - export CLOUDDNS_DNS_NAME="${RAND}.com." - export CLOUDDNS_DESCRIPTION="Managed DNS Zone for Testing" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < templates/dns_managed_zone/tests/integration/${TEST_NAME}.yaml > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; this is executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; this is executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - delete_config - rm -f "${RANDOM_FILE}" - fi - - # Per-test teardown steps. -} - - -########## TESTS ########## - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config "${CONFIG}" --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] -} - -@test "Verify if a managed zone with name $CLOUDDNS_ZONE_NAME was created" { - run gcloud dns managed-zones list --format=flattened \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${CLOUDDNS_ZONE_NAME}" ]] -} - -@test "Verify if a DNS named ${CLOUDDNS_DNS_NAME} was created" { - run gcloud dns managed-zones list --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${CLOUDDNS_DNS_NAME}" ]] -} - -@test "Deleting deployment ${DEPLOYMENT_NAME}" { - gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" \ - -q --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - run gcloud dns managed-zones list - [[ "$status" -eq 0 ]] - [[ ! "$output" =~ "${CLOUDDNS_ZONE_NAME}" ]] -} diff --git a/dm/templates/dns_managed_zone/tests/integration/dns_mz_simple.yaml b/dm/templates/dns_managed_zone/tests/integration/dns_mz_simple.yaml deleted file mode 100644 index 6a1a7956d9a..00000000000 --- a/dm/templates/dns_managed_zone/tests/integration/dns_mz_simple.yaml +++ /dev/null @@ -1,13 +0,0 @@ -# Test of the simplest DNS managed zone template. - -imports: - - path: templates/dns_managed_zone/dns_managed_zone.py - name: dns_managed_zone.py - -resources: - - name: ${CLOUDDNS_ZONE_NAME}-resource - type: dns_managed_zone.py - properties: - name: ${CLOUDDNS_ZONE_NAME} - dnsName: ${CLOUDDNS_DNS_NAME} - description: ${CLOUDDNS_DESCRIPTION} diff --git a/dm/templates/dns_records/README.md b/dm/templates/dns_records/README.md deleted file mode 100644 index 9b451700d9a..00000000000 --- a/dm/templates/dns_records/README.md +++ /dev/null @@ -1,64 +0,0 @@ -# DNS Resource RecordSets - -This template creates Cloud DNS records using recordsets. - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) -- Grant the [dns.admin](https://cloud.google.com/dns/access-control) IAM role to the Deployment Manager `serviceAccount` - -## Deployment - -### Resources - -- [gcp-types/dns-v1](https://cloud.google.com/dns/api/v1/changes) - -### Properties - -See the `properties` section in the schema file(s): - -- [DNS records](dns_records.py.schema) - -### Usage - -1. Clone the [Deployment Manager samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit): - - ```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit - ``` - -2. Go to the [dm](../../) directory: - - ```shell - cd dm - ``` - -3. Copy the example DM config to be used as a model for the deployment; in this case, [examples/dns_records.yaml](examples/dns_records.yaml): - - ```shell - cp templates/dns_records/examples/dns_records.yaml my_dns_records.yaml - ``` - -4. Change the values in the config file to match your specific GCP setup (for properties, refer to the schema files listed above): - - ```shell - vim my_dns_records.yaml # <== change values to match your GCP setup - ``` - -5. Create your deployment (replace with the relevant deployment name): - - ```shell - gcloud deployment-manager deployments create \ - --config my_dns_records.yaml - ``` - -6. In case you need to delete your deployment: - - ```shell - gcloud deployment-manager deployments delete - ``` - -## Examples - -- [DNS records](examples/dns_records.yaml) diff --git a/dm/templates/dns_records/dns_records.py b/dm/templates/dns_records/dns_records.py deleted file mode 100644 index 393416bcf8d..00000000000 --- a/dm/templates/dns_records/dns_records.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""This template creates DNS records for a managed zone.""" - - -def generate_config(context): - """ - Entry point for the deployment resources. - - DNS RecordSet is natively supported since 2019. - """ - - recordset = { - 'name': context.env['name'], - # https://cloud.google.com/dns/docs/reference/v1/resourceRecordSets - 'type': 'gcp-types/dns-v1:resourceRecordSets', - 'properties': { - 'name': context.properties['dnsName'], - 'managedZone': context.properties['zoneName'], - 'records': context.properties['resourceRecordSets'], - } - } - - return {'resources': [recordset]} diff --git a/dm/templates/dns_records/dns_records.py.schema b/dm/templates/dns_records/dns_records.py.schema deleted file mode 100644 index a4fa6dc4bfe..00000000000 --- a/dm/templates/dns_records/dns_records.py.schema +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright 2016 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Cloud DNS Records - author: Sourced Group Inc. - version: 1.0.0 - description: | - Creates DNS resource recordsets. - - For more information on this resource: - - https://cloud.google.com/dns/records/ - - APIs endpoints used by this template: - - gcp-types/dns-v1:resourceRecordSets => - https://cloud.google.com/dns/docs/reference/v1/resourceRecordSets - -additionalProperties: false - -required: - - zoneName - - dnsName - - resourceRecordSets - -properties: - zoneName: - type: string - pattern: ^[(a-z)\d\-].{1,63}$ - description: | - A user-assigned name for the managed zone. - This is required by the Cloud DNS. - dnsName: - type: string - pattern: ^([(a-z)\d\-]{1,62}\.){1,9}([(a-z)\d\-]{1,61}){0,1}\.$ - description: | - The DNS name of the managed zone; for example, "example.com." - A fully qualified domain name (FQDN) must end with a period "." - Must be fully compliant with RFC 1035. - resourceRecordSets: - type: array - description: | - A list of ResourceRecordSets meant to be in the zone. See - https://cloud.google.com/dns/api/v1/resourceRecordSets. - items: - type: object - description: An Individual ResourceRecordSet. - required: - - name - - type - - ttl - - rrdatas - properties: - name: - type: string - description: The name of the DNS record. Must end with dnsName. - pattern: ([(a-z)\d\-]{1,62}\.){1,9}([(a-z)\d\-]{1,61}){0,1}\.$ - kind: - type: string - description: | - Identifies what kind of resource this is. - Value: the fixed string "dns#resourceRecordSet". - pattern: ^dns#resourceRecordSet$ - default: "dns#resourceRecordSet" - rrdatas: - type: array - description: | - A list of resourceRecordSets as defined in - RFC 1035 (section 5) and RFC 1034 (section 3.6.1). - Examples - https://cloud.google.com/dns/records/json-record - items: - type: string - signatureRrdatas: - type: array - description: As defined in RFC 4034 (section 3.2). - items: - type: string - ttl: - type: integer - description: | - Number of seconds that this ResourceRecordSet can be cached by resolvers. - minimum: 0 - type: - type: string - description: | - The identifier of a supported record type. - https://cloud.google.com/dns/docs/overview#supported_dns_record_types - enum: - - A - - AAAA - - CAA - - CNAME - - IPSECKEY - - MX - - NAPTR - - NS - - PTR - - SOA - - SPF - - SRV - - SSHFP - - TLSA - - TXT diff --git a/dm/templates/dns_records/examples/dns_records.yaml b/dm/templates/dns_records/examples/dns_records.yaml deleted file mode 100644 index b45a11fe682..00000000000 --- a/dm/templates/dns_records/examples/dns_records.yaml +++ /dev/null @@ -1,54 +0,0 @@ -# Example of the DNS records template usage. -# -# This example creates DNS RecordSets to manage a set of DNS records. -# The records are created for a given managed zone, defined by -# the `zoneName` property, and a DNS name defined by the `dnsName` property. - -imports: - - path: templates/dns_records/dns_records.py - name: dns_records.py - -resources: -- name: test-dns-records - type: dns_records.py - properties: - zoneName: test-managed-zone - dnsName: foobar.local. - resourceRecordSets: - - name: www.foobar.local. - type: A - ttl: 20 - rrdatas: - - 10.1.1.1 - - name: www.foobar.local. - type: AAAA - ttl: 30 - rrdatas: - - 1002:db8::8bd:2001 - - name: mail.foobar.local. - type: MX - ttl: 300 - rrdatas: - - 5 smtp.fmail.foobar.local. - - 15 smtpx.mail.foobar.local. - - 25 smtp.mail.foobar.local. - - name: txt.foobar.local. - type: TXT - ttl: 235 - rrdatas: - - '"my super awesome text record"' - - name: 2.1.0.10.foobar.local. - type: PTR - ttl: 60 - rrdatas: - - sever.foobar.com. - - name: foobar.local. - type: SPF - ttl: 21600 - rrdatas: - - v=spf1 mx:foobar.com -all - - name: sip.foobar.local. - type: SRV - ttl: 21600 - rrdatas: - - 0 5 5060 sip.foobar.local. diff --git a/dm/templates/dns_records/tests/integration/dns_records.bats b/dm/templates/dns_records/tests/integration/dns_records.bats deleted file mode 100755 index 09bd3340d5c..00000000000 --- a/dm/templates/dns_records/tests/integration/dns_records.bats +++ /dev/null @@ -1,267 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save to a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" - # test specific variables - export CLOUDDNS_ZONE_NAME="test-managedzone-${RAND}" - export CLOUDDNS_DNS_NAME="${RAND}.com." - export A_RECORD_NAME="${CLOUDDNS_DNS_NAME}" - export AAAA_RECORD_NAME="${CLOUDDNS_DNS_NAME}" - export A_RECORD_IP="192.0.1.1" - export AAAA_RECORD_IP="1002:db8::8bd:2001" - export MX_RECORD_NAME="${CLOUDDNS_DNS_NAME}" - export MX_RECORD="25 smtp.mail.${CLOUDDNS_DNS_NAME}" - export TXT_RECORD_NAME="${CLOUDDNS_DNS_NAME}" - export TXT_RECORD="'\"my super awesome text record\"'" - export PTR_RECORD_NAME="${CLOUDDNS_DNS_NAME}" - export PTR_RECORD="server.${CLOUDDNS_DNS_NAME}" - export SPF_RECORD_NAME="${CLOUDDNS_DNS_NAME}" - export SPF_RECORD="'\"v=spf1 mx:${RAND}.com -all\"'" - export SRV_RECORD_NAME="${CLOUDDNS_DNS_NAME}" - export SRV_RECORD="0 5 5060 ${SRV_RECORD_NAME}" - -fi - - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < ${BATS_TEST_DIRNAME}/${TEST_NAME}.yaml > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; this is executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - # Create DNS Managed Zone - gcloud dns managed-zones create --dns-name="${CLOUDDNS_DNS_NAME}" \ - --description="Test managed zone" "${CLOUDDNS_ZONE_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; this is executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - delete_config - rm -f "${RANDOM_FILE}" - # Delete DNS Managed Zone - echo "Deleting cloud DNS managed zone: ${CLOUDDNS_ZONE_NAME}" - gcloud dns managed-zones delete "${CLOUDDNS_ZONE_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - fi - - # Per-test teardown steps. -} - - -@test "Creating deployment: ${DEPLOYMENT_NAME} from ${CONFIG}" { - gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config "${CONFIG}" --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] -} - -@test "A record $A_RECORD_NAME is created " { - run gcloud dns record-sets list --zone="${CLOUDDNS_ZONE_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" --filter="type=(A)" \ - --format="csv[no-heading](name)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${A_RECORD_NAME}" ]] -} - -@test "A record IP ${A_RECORD_IP} is in rrdatas " { - run gcloud dns record-sets list --zone="${CLOUDDNS_ZONE_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" --filter="type=(A)" \ - --format="csv[no-heading](DATA)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${A_RECORD_IP}" ]] -} - -@test "A record ${A_RECORD_NAME} has TTL set to 20 " { - run gcloud dns record-sets list --zone="${CLOUDDNS_ZONE_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" --filter="type=(A)" \ - --format="csv[no-heading](TTL)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "20" ]] -} - -@test "AAAA record ${AAAA_RECORD_NAME} is created " { - run gcloud dns record-sets list --zone="${CLOUDDNS_ZONE_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" --filter="type=(AAAA)" \ - --format="csv[no-heading](name)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${AAAA_RECORD_NAME}" ]] -} - -@test "AAAA record IP ${AAAA_RECORD_IP} is in rrdatas " { - run gcloud dns record-sets list --zone="${CLOUDDNS_ZONE_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" --filter="type=(AAAA)" \ - --format="csv[no-heading](DATA)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${AAAA_RECORD_IP}" ]] -} - -@test "AAAA record ${AAAA_RECORD_NAME} has TTL set to 30 " { - run gcloud dns record-sets list --zone="${CLOUDDNS_ZONE_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" --filter="type=(AAAA)" \ - --format="csv[no-heading](TTL)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "30" ]] -} - -@test "MX record ${MX_RECORD_NAME} is created" { - run gcloud dns record-sets list --zone="${CLOUDDNS_ZONE_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" --filter="type=(MX)" \ - --format="csv[no-heading](name)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${MX_RECORD_NAME}" ]] -} - -@test "MX record ${MX_RECORD} is in rrdatas " { - run gcloud dns record-sets list --zone="${CLOUDDNS_ZONE_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" --filter="type=(MX)" \ - --format="csv[no-heading](DATA)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${MX_RECORD}" ]] -} - -@test "MX record ${MX_RECORD} TTL is set to 300 " { - run gcloud dns record-sets list --zone="${CLOUDDNS_ZONE_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" --filter="type=(MX)" \ - --format="csv[no-heading](TTL)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "300" ]] -} - -@test "TXT record ${TXT_RECORD_NAME} is created" { - run gcloud dns record-sets list --zone="${CLOUDDNS_ZONE_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" --filter="type=(TXT)" \ - --format="csv[no-heading](name)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${TXT_RECORD_NAME}" ]] -} - -@test "TXT record has data ${TXT_RECORD} in rrdatas " { - run gcloud dns record-sets list --zone="${CLOUDDNS_ZONE_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" --filter="type=(TXT)" \ - --format="csv[no-heading](DATA)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "my super awesome text record" ]] -} - -@test "TXT record: ${TXT_RECORD_NAME} has TTL set to 235 " { - run gcloud dns record-sets list --zone="${CLOUDDNS_ZONE_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" --filter="type=(TXT)" \ - --format="csv[no-heading](TTL)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "235" ]] -} - -@test "PTR record ${PTR_RECORD_NAME} is created " { - run gcloud dns record-sets list --zone="${CLOUDDNS_ZONE_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" --filter="type=(PTR)" \ - --format="csv[no-heading](name)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${PTR_RECORD_NAME}" ]] -} - -@test "PTR record has data ${PTR_RECORD} in rrdatas " { - run gcloud dns record-sets list --zone="${CLOUDDNS_ZONE_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" --filter="type=(PTR)" \ - --format="csv[no-heading](DATA)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${PTR_RECORD}" ]] -} - -@test "PTR record ${PTR_RECORD_NAME} has TTL set to 60 " { - run gcloud dns record-sets list --zone="${CLOUDDNS_ZONE_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" --filter="type=(PTR)" \ - --format="csv[no-heading](TTL)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "60" ]] -} - -@test "SPF record ${SPF_RECORD_NAME} is created " { - run gcloud dns record-sets list --zone="${CLOUDDNS_ZONE_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" --filter="type=(SPF)" \ - --format="csv[no-heading](name)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${SPF_RECORD_NAME}" ]] -} - -@test "SPF record has data ${SPF_RECORD} in rrdatas " { - run gcloud dns record-sets list --zone="${CLOUDDNS_ZONE_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" --filter="type=(SPF)" \ - --format="csv[no-heading](DATA)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "v=spf1" ]] -} - -@test "SPF record ${SPF_RECORD_NAME} has TTL set to 21600 " { - run gcloud dns record-sets list --zone="${CLOUDDNS_ZONE_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" --filter="type=(SPF)" \ - --format="csv[no-heading](TTL)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "21600" ]] -} - -@test "SRV record ${SRV_RECORD_NAME} is created " { - run gcloud dns record-sets list --zone="${CLOUDDNS_ZONE_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" --filter="type=(SRV)" \ - --format="csv[no-heading](name)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${SRV_RECORD_NAME}" ]] -} - -@test "SRV record has data ${SRV_RECORD} in rrdatas" { - run gcloud dns record-sets list --zone="${CLOUDDNS_ZONE_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" --filter="type=(SRV)" \ - --format="csv[no-heading](DATA)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${SRV_RECORD}" ]] -} - -@test "SRV record ${SRV_RECORD_NAME} has TTL set to 21600 " { - run gcloud dns record-sets list --zone="${CLOUDDNS_ZONE_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" --filter="type=(SRV)" \ - --format="csv[no-heading](TTL)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "21600" ]] -} - -@test "Deleting deployment ${DEPLOYMENT_NAME}" { - gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" \ - -q --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - run gcloud dns record-sets list --zone="${CLOUDDNS_ZONE_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" --format=flattened - [[ "$status" -eq 0 ]] - [[ ! "$output" =~ "${A_RECORD_IP}" ]] - [[ ! "$output" =~ "${AAAA_RECORD_IP}" ]] -} - diff --git a/dm/templates/dns_records/tests/integration/dns_records.yaml b/dm/templates/dns_records/tests/integration/dns_records.yaml deleted file mode 100644 index c637294efb5..00000000000 --- a/dm/templates/dns_records/tests/integration/dns_records.yaml +++ /dev/null @@ -1,51 +0,0 @@ -# Test of the DNS records template. -# - -imports: - - path: templates/dns_records/dns_records.py - name: dns_records.py - -resources: -- name: ${CLOUDDNS_ZONE_NAME} - type: dns_records.py - properties: - zoneName: ${CLOUDDNS_ZONE_NAME} - dnsName: ${CLOUDDNS_DNS_NAME} - resourceRecordSets: - - name: ${A_RECORD_NAME} - type: A - ttl: 20 - rrdatas: - - ${A_RECORD_IP} - - name: ${AAAA_RECORD_NAME} - type: AAAA - ttl: 30 - rrdatas: - - ${AAAA_RECORD_IP} - - name: ${MX_RECORD_NAME} - type: MX - ttl: 300 - rrdatas: - - ${MX_RECORD} - - 10 smtp.xmail.${RAND}.com. - - 15 smtp.mail.dev.${RAND}.com. - - name: ${TXT_RECORD_NAME} - type: TXT - ttl: 235 - rrdatas: - - ${TXT_RECORD} - - name: ${PTR_RECORD_NAME} - type: PTR - ttl: 60 - rrdatas: - - ${PTR_RECORD} - - name: ${SPF_RECORD_NAME} - type: SPF - ttl: 21600 - rrdatas: - - ${SPF_RECORD} - - name: ${SRV_RECORD_NAME} - type: SRV - ttl: 21600 - rrdatas: - - ${SRV_RECORD} diff --git a/dm/templates/external_load_balancer/README.md b/dm/templates/external_load_balancer/README.md deleted file mode 100644 index 0f9779bd76d..00000000000 --- a/dm/templates/external_load_balancer/README.md +++ /dev/null @@ -1,81 +0,0 @@ -# External Load Balancer - -This template creates an HTTP(S), SSL Proxy, or TCP Proxy external load balancer. - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) -- Grant the [compute.loadBalancerAdmin](https://cloud.google.com/compute/docs/access/iam) - IAM role to the Deployment Manager service account -- For using the TCP Proxy load balancing, request access to the Compute ALPHA features - from the Cloud [Support](https://cloud.google.com/support/) - -## Deployment - -### Resources - -- [compute.v1.forwardingRule](https://cloud.google.com/compute/docs/reference/latest/forwardingRules) -- [compute.v1.targetHttpProxy](https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies) -- [compute.v1.targetHttpsProxy](https://cloud.google.com/compute/docs/reference/latest/targetHttpsProxies) -- [compute.alpha.targetTcpProxy](https://www.googleapis.com/discovery/v1/apis/compute/alpha/rest) -- [compute.v1.targetSslProxy](https://cloud.google.com/compute/docs/reference/latest/targetSslProxies) -- [compute.v1.backendService](https://cloud.google.com/compute/docs/reference/rest/v1/backendServices) -- [compute.v1.sslCertificate](https://cloud.google.com/compute/docs/reference/rest/v1/sslCertificates) -- [compute.v1.urlMap](https://cloud.google.com/compute/docs/reference/rest/v1/urlMaps) - -### Properties - -See the `properties` section in the schema file(s): - -- [External Load Balancer](external_load_balancer.py.schema) - -### Usage - -1. Clone the [Deployment Manager Samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit): - -```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit -``` - -2. Go to the [dm](../../) directory: - -```shell - cd dm -``` - -3. Copy the example DM config to be used as a model for the deployment; in this - case, [examples/external\_load\_balancer.yaml](examples/external_load_balancer.yaml): - -```shell - cp templates/external_load_balancer/examples/external_load_balancer.yaml \ - my_external_load_balancer.yaml -``` - -4. Change the values in the config file to match your specific GCP setup (for - properties, refer to the schema files listed above): - -```shell - vim my_external_load_balancer.yaml # <== change values to match your GCP setup -``` - -5. Create your deployment (replace \ with the relevant - deployment name): - -```shell - gcloud deployment-manager deployments create \ - --config my_external_load_balancer.yaml -``` - -6. In case you need to delete your deployment: - -```shell - gcloud deployment-manager deployments delete -``` - -## Examples - -- [External HTTP Load Balancer](examples/external_load_balancer_http.yaml) -- [External HTTPS Load Balancer](examples/external_load_balancer_https.yaml) -- [External SSL Load Balancer](examples/external_load_balancer_ssl.yaml) -- [External TCP Load Balancer](examples/external_load_balancer_tcp.yaml) diff --git a/dm/templates/external_load_balancer/examples/external_load_balancer_http.yaml b/dm/templates/external_load_balancer/examples/external_load_balancer_http.yaml deleted file mode 100644 index 6b981fa002c..00000000000 --- a/dm/templates/external_load_balancer/examples/external_load_balancer_http.yaml +++ /dev/null @@ -1,30 +0,0 @@ -# Example of the external load balancer template usage. -# -# This example creates an external HTTP load balancer. -# -# Replace the following placeholders with valid values: -# : a URL of the HTTP healthcheck -# : a URL of the instance group to accept -# incoming traffic at port 80 - -imports: - - path: templates/external_load_balancer/external_load_balancer.py - name: external_load_balancer.py - -resources: - - name: example-http-elb - type: external_load_balancer.py - properties: - portRange: 80 - backendServices: - - resourceName: default-backend-service - sessionAffinity: GENERATED_COOKIE - affinityCookieTtlSec: 1000 - portName: http - healthCheck: - backends: - - group: - balancingMode: UTILIZATION - maxUtilization: 0.8 - urlMap: - defaultService: default-backend-service diff --git a/dm/templates/external_load_balancer/examples/external_load_balancer_https.yaml b/dm/templates/external_load_balancer/examples/external_load_balancer_https.yaml deleted file mode 100644 index a3edef9f3be..00000000000 --- a/dm/templates/external_load_balancer/examples/external_load_balancer_https.yaml +++ /dev/null @@ -1,57 +0,0 @@ -# Example of the external load balancer template usage. -# -# This example creates an external HTTPS load balancer with multiple backend -# services. -# -# Replace the following placeholders with valid values: -# : a URL of the HTTPS healthcheck -# : a URL of the first instance group to accept -# incoming traffic at port 443 -# : a URL of the second instance group to accept -# incoming traffic at port 443 -# : A name of the host from which the /media path will be -# served -# : contents of the certificate file -# : contents of the private key file - -imports: - - path: templates/external_load_balancer/external_load_balancer.py - name: external_load_balancer.py - -resources: - - name: example-https-elb - type: external_load_balancer.py - properties: - portRange: 443 - backendServices: - - resourceName: static-backend-service - portName: https - healthCheck: - backends: - - group: - balancingMode: UTILIZATION - maxUtilization: 0.8 - - resourceName: media-backend-service - portName: https - healthCheck: - backends: - - group: - balancingMode: UTILIZATION - maxUtilization: 0.8 - urlMap: - defaultService: static-backend-service - hostRules: - - hosts: - - - pathMatcher: media-matcher - pathMatchers: - - name: media-matcher - defaultService: static-backend-service - pathRules: - - service: media-backend-service - paths: - - /media - ssl: - certificate: - certificate: - privateKey: diff --git a/dm/templates/external_load_balancer/examples/external_load_balancer_ssl.yaml b/dm/templates/external_load_balancer/examples/external_load_balancer_ssl.yaml deleted file mode 100644 index efd92e3ad38..00000000000 --- a/dm/templates/external_load_balancer/examples/external_load_balancer_ssl.yaml +++ /dev/null @@ -1,30 +0,0 @@ -# Example of the external load balancer template usage. -# -# This example creates an SSL Proxy load balancer. -# -# Replace the following placeholders with valid values: -# : a URL of the SSL healthcheck -# : a URL of the instance group to accept -# incoming traffic at port 443 -# : a URL of the SslCertificate resource - -imports: - - path: templates/external_load_balancer/external_load_balancer.py - name: external_load_balancer.py - -resources: - - name: example-ssl-elb - type: external_load_balancer.py - properties: - portRange: 443 - backendServices: - - resourceName: backend-service - portName: https - healthCheck: - backends: - - group: - balancingMode: UTILIZATION - maxUtilization: 0.8 - ssl: - certificate: - url: diff --git a/dm/templates/external_load_balancer/examples/external_load_balancer_tcp.yaml b/dm/templates/external_load_balancer/examples/external_load_balancer_tcp.yaml deleted file mode 100644 index eff7753505f..00000000000 --- a/dm/templates/external_load_balancer/examples/external_load_balancer_tcp.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Example of the external load balancer template usage. -# -# This example creates an TCP Proxy load balancer. As the underlying -# targetTcpProxy resource has the ALPHA availability, you need to -# enable the Compute ALPHA API to tun this example. -# -# Replace the following placeholders with valid values: -# : a URL of the TCP healthcheck -# : a URL of the instance group to accept -# incoming traffic at port 80 - -imports: - - path: templates/external_load_balancer/external_load_balancer.py - name: external_load_balancer.py - -resources: - - name: example-tcp-elb - type: external_load_balancer.py - properties: - portRange: 25 - backendServices: - - resourceName: backend-service - portName: http - healthCheck: - backends: - - group: - balancingMode: UTILIZATION - maxUtilization: 0.8 diff --git a/dm/templates/external_load_balancer/external_load_balancer.py b/dm/templates/external_load_balancer/external_load_balancer.py deleted file mode 100644 index b7466487571..00000000000 --- a/dm/templates/external_load_balancer/external_load_balancer.py +++ /dev/null @@ -1,313 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" This template creates an external load balancer. """ - -import copy -from hashlib import sha1 -import json - - -def set_optional_property(destination, source, prop_name): - """ Copies the property value if present. """ - - if prop_name in source: - destination[prop_name] = source[prop_name] - - -def get_backend_service(properties, backend_spec, res_name, project_id): - """ Creates the backend service. """ - - name = backend_spec.get('resourceName', res_name) - backend_name = backend_spec.get('name', name) - backend_properties = { - 'name': backend_name, - 'project': project_id, - 'loadBalancingScheme': 'EXTERNAL', - 'protocol': backend_spec.get('protocol', get_protocol(properties)), - } - - backend_resource = { - 'name': name, - 'type': 'backend_service.py', - 'properties': backend_properties - } - - optional_properties = [ - 'description', - 'backends', - 'timeoutSec', - 'sessionAffinity', - 'connectionDraining', - 'backends', - 'healthCheck', - 'healthChecks', - 'portName', - 'enableCDN', - 'affinityCookieTtlSec' - ] - - for prop in optional_properties: - set_optional_property(backend_properties, backend_spec, prop) - - return [backend_resource], [ - { - 'name': 'backendServiceName', - 'value': backend_name, - }, - { - 'name': 'backendServiceSelfLink', - 'value': '$(ref.{}.selfLink)'.format(name), - }, - ] - - -def get_forwarding_rule(properties, target, res_name, project_id): - """ Creates the forwarding rule. """ - - name = '{}-forwarding-rule'.format(res_name) - rule_properties = { - 'name': properties.get('name', res_name), - 'project': project_id, - 'loadBalancingScheme': 'EXTERNAL', - 'target': '$(ref.{}.selfLink)'.format(target['name']), - 'IPProtocol': 'TCP', - } - - rule_resource = { - 'name': name, - 'type': 'forwarding_rule.py', - 'properties': rule_properties, - 'metadata': { - 'dependsOn': [target['name']], - }, - } - - optional_properties = [ - 'description', - 'IPAddress', - 'ipVersion', - 'portRange', - 'labels', - ] - - for prop in optional_properties: - set_optional_property(rule_properties, properties, prop) - - return [rule_resource], [ - { - 'name': 'forwardingRuleName', - 'value': rule_properties['name'], - }, - { - 'name': 'forwardingRuleSelfLink', - 'value': '$(ref.{}.selfLink)'.format(name), - }, - { - 'name': 'IPAddress', - 'value': '$(ref.{}.IPAddress)'.format(name), - }, - ] - - -def get_backend_services(properties, res_name, project_id): - """ Creates all backend services to be used by the load balancer. """ - - backend_resources = [] - backend_outputs_map = { - 'backendServiceName': [], - 'backendServiceSelfLink': [] - } - backend_specs = properties['backendServices'] - - for backend_spec in backend_specs: - backend_res_name = '{}-backend-service-{}'.format(res_name, sha1(json.dumps(backend_spec).encode('utf-8')).hexdigest()[:10]) - resources, outputs = get_backend_service(properties, backend_spec, backend_res_name, project_id) - backend_resources += resources - # Merge outputs with the same name. - for output in outputs: - backend_outputs_map[output['name']].append(output['value']) - - backend_outputs = [] - for key, value in backend_outputs_map.items(): - backend_outputs.append({'name': key + 's', 'value': value}) - - return backend_resources, backend_outputs - - -def get_ref(name, prop='selfLink'): - """ Creates reference to a property of a given resource. """ - - return '$(ref.{}.{})'.format(name, prop) - - -def update_refs_recursively(properties): - """ Replaces service names with the service selflinks recursively. """ - - for prop in properties: - value = properties[prop] - if prop == 'defaultService' or prop == 'service': - is_regular_name = not '.' in value and not '/' in value - if is_regular_name: - properties[prop] = get_ref(value) - elif isinstance(value, dict): - update_refs_recursively(value) - elif isinstance(value, list): - for item in value: - if isinstance(item, dict): - update_refs_recursively(item) - - -def get_url_map(properties, res_name, project_id): - """ Creates a UrlMap resource. """ - - spec = copy.deepcopy(properties) - spec['project'] = project_id - spec['name'] = properties.get('name', res_name) - update_refs_recursively(spec) - - resource = { - 'name': res_name, - 'type': 'url_map.py', - 'properties': spec, - } - - self_link = '$(ref.{}.selfLink)'.format(res_name) - - return self_link, [resource], [ - { - 'name': 'urlMapName', - 'value': '$(ref.{}.name)'.format(res_name) - }, - { - 'name': 'urlMapSelfLink', - 'value': self_link - } - ] - - -def get_target_proxy(properties, res_name, project_id, bs_resources): - """ Creates a target proxy resource. """ - - protocol = get_protocol(properties) - - depends = [] - if 'HTTP' in protocol: - urlMap = copy.deepcopy(properties['urlMap']) - if 'name' not in urlMap and 'name' in properties: - urlMap['name'] = '{}-url-map'.format(properties['name']) - target, resources, outputs = get_url_map( - urlMap, - '{}-url-map'.format(res_name), - project_id - ) - depends.append(resources[0]['name']) - else: - depends.append(bs_resources[0]['name']) - target = get_ref(bs_resources[0]['name']) - resources = [] - outputs = [] - - name = '{}-target'.format(res_name) - proxy = { - 'name': name, - 'type': 'target_proxy.py', - 'properties': { - 'name': '{}-target'.format(properties.get('name', res_name)), - 'project': project_id, - 'protocol': protocol, - 'target': target - }, - 'metadata': { - 'dependsOn': [depends], - }, - } - - for prop in ['proxyHeader', 'quicOverride']: - set_optional_property(proxy['properties'], properties, prop) - - outputs.extend( - [ - { - 'name': 'targetProxyName', - 'value': '$(ref.{}.name)'.format(name) - }, - { - 'name': 'targetProxySelfLink', - 'value': '$(ref.{}.selfLink)'.format(name) - }, - { - 'name': 'targetProxyKind', - 'value': '$(ref.{}.kind)'.format(name) - } - ] - ) - - if 'ssl' in properties: - ssl_spec = properties['ssl'] - proxy['properties']['ssl'] = ssl_spec - if 'certificate' in ssl_spec: - creates_new_certificate = not 'url' in ssl_spec.get('certificate') - if creates_new_certificate: - outputs.extend( - [ - { - 'name': 'certificateName', - 'value': '$(ref.{}.certificateName)'.format(name) - }, - { - 'name': 'certificateSelfLink', - 'value': '$(ref.{}.certificateSelfLink)'.format(name) - } - ] - ) - - return [proxy] + resources, outputs - - -def get_protocol(properties): - """ Finds what network protocol to use. """ - - is_web = 'urlMap' in properties - is_secure = 'ssl' in properties - - if is_web: - if is_secure: - return 'HTTPS' - return 'HTTP' - - if is_secure: - return 'SSL' - return 'TCP' - - -def generate_config(context): - """ Entry point for the deployment resources. """ - - properties = context.properties - project_id = properties.get('project', context.env['project']) - - # Forwarding rule + target proxy + backend service = ELB - bs_resources, bs_outputs = get_backend_services(properties, context.env['name'], project_id) - target_resources, target_outputs = get_target_proxy(properties, context.env['name'], project_id, bs_resources) - rule_resources, rule_outputs = get_forwarding_rule( - properties, - target_resources[0], - context.env['name'], - project_id - ) - - return { - 'resources': bs_resources + target_resources + rule_resources, - 'outputs': bs_outputs + target_outputs + rule_outputs, - } diff --git a/dm/templates/external_load_balancer/external_load_balancer.py.schema b/dm/templates/external_load_balancer/external_load_balancer.py.schema deleted file mode 100644 index e347786fe3d..00000000000 --- a/dm/templates/external_load_balancer/external_load_balancer.py.schema +++ /dev/null @@ -1,641 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: External Load Balancer - author: Sourced Group Inc. - version: 1.1.2 - description: | - Supports creation of an HTTP(S), SSL Proxy, or TCP Proxy external load - balancer. For details, visit https://cloud.google.com/load-balancing/docs/. - -imports: - - path: ../backend_service/backend_service.py - name: backend_service.py - - path: ../forwarding_rule/forwarding_rule.py - name: forwarding_rule.py - - path: ../url_map/url_map.py - name: url_map.py - - path: ../ssl_certificate/ssl_certificate.py - name: ssl_certificate.py - - path: ../target_proxy/target_proxy.py - name: target_proxy.py - -additionalProperties: false - -oneOf: - - not: - required: - - urlMap - - ssl - portRange: - type: array - items: - - type: string - enum: - - 25 - - 43 - - 110 - - 143 - - 195 - - 443 - - 465 - - 587 - - 700 - - 993 - - 995 - - 1883 - - 5222 - - type: integer - enum: - - 25 - - 43 - - 110 - - 143 - - 195 - - 443 - - 465 - - 587 - - 700 - - 993 - - 995 - - 1883 - - 5222 - - required: - - urlMap - - required: - - ssl - -properties: - name: - type: string - description: | - The external load balancer name. This name is assigned to the - underlying forwarding rule resource. - Resource name would be used if omitted. - project: - type: string - description: | - The project ID of the project containing the service. - description: - type: string - description: | - The description of the external load balancer (optional). This - description is assigned to the underlying forwarding rule resource. - IPAddress: - type: string - description: | - The IP address on behalf of which the forwarding rule serves. Can be - specified either by a literal IP address or by a URL reference to an - existing Address resource. - portRange: - type: [integer,string] - description: | - The port range; only packets addressed to ports in that range are - forwarded to the target. - labels: - type: object - description: | - Labels to apply to this instance. These can be later modified by the setLabels method. - - An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. - - ipVersion: - type: string - description: The IP version of the load balancer. - enum: - - IPV4 - - IPV6 - urlMap: - type: object - additionalProperties: false - properties: - name: - type: string - description: | - Must comply with RFC1035. Specifically, the name must be 1-63 characters long and match - the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, - and all following characters must be a dash, lowercase letter, or digit, except the last character, - which cannot be a dash. - ELB name would be used if omitted. - description: - type: string - description: The resource description (optional). - defaultService: - type: string - description: | - The full or partial URL of the defaultService resource to which traffic is directed if none of the - hostRules match. If defaultRouteAction is additionally specified, advanced routing actions like URL Rewrites, - etc. take effect prior to sending the request to the backend. However, if defaultService is specified, - defaultRouteAction cannot contain any weightedBackendServices. Conversely, if routeAction specifies any - weightedBackendServices, service must not be specified. - - Only one of defaultService, defaultUrlRedirect or defaultRouteAction.weightedBackendService must be set. - - Authorization requires one or more of the following Google IAM permissions on the specified resource defaultService: - - compute.backendBuckets.use - - compute.backendServices.use - hostRules: - type: array - uniqueItems: true - description: | - The list of HostRules to use against the URL. - items: - type: object - additionalProperties: false - properties: - description: - type: string - description: | - The resource description (optional). - hosts: - type: array - description: | - The list of host patterns to match. They must be valid hostnames, except * will match any string of - ([a-z0-9-.]*). In that case, * must be the first character and must be followed - in the pattern by either - or .. - items: - type: string - pathMatcher: - type: string - description: | - The name of the PathMatcher to use for matching the path portion of - the URL if the hostRule matches the URL's host portion. - pathMatchers: - type: array - uniqueItems: true - description: | - The list of the named PathMatchers to use against the URL. - items: - type: object - additionalProperties: false - properties: - name: - type: string - description: | - The name to which the PathMatcher is referred by the HostRule. - description: - type: string - description: | - The resource description (optional). - defaultService: - type: string - description: | - The full or partial URL to the BackendService resource. This will be used if none of the pathRules or - routeRules defined by this PathMatcher are matched. For example, the following are - all valid URLs to a BackendService resource: - - https://www.googleapis.com/compute/v1/projects/project/global/backendServices/backendService - - compute/v1/projects/project/global/backendServices/backendService - - global/backendServices/backendService - - If defaultRouteAction is additionally specified, advanced routing actions like URL Rewrites, etc. take - effect prior to sending the request to the backend. However, if defaultService is specified, - defaultRouteAction cannot contain any weightedBackendServices. - Conversely, if defaultRouteAction specifies any weightedBackendServices, defaultService must not be specified. - Only one of defaultService, defaultUrlRedirect or defaultRouteAction.weightedBackendService must be set. - - Authorization requires one or more of the following Google IAM permissions on the specified resource defaultService: - - compute.backendBuckets.use - - compute.backendServices.use - - Authorization requires one or more of the following Google IAM permissions on the specified resource defaultService: - - compute.backendBuckets.use - - compute.backendServices.use - pathRules: - type: array - uniqueItems: true - description: | - The list of path rules. - items: - type: object - additionalProperties: false - properties: - service: - type: string - description: | - The full or partial URL of the backend service resource to which traffic is directed if this - rule is matched. If routeAction is additionally specified, advanced routing actions like - URL Rewrites, etc. take effect prior to sending the request to the backend. However, if service - is specified, routeAction cannot contain any weightedBackendService s. Conversely, if routeAction - specifies any weightedBackendServices, service must not be specified. - - Only one of urlRedirect, service or routeAction.weightedBackendService must be set. - - Authorization requires one or more of the following Google IAM permissions on the specified resource service: - - compute.backendBuckets.use - - compute.backendServices.use - paths: - type: array - uniqueItems: true - description: | - The list of the path patterns to match. Each pattern must - start with /. Asterisks (*) are allowed only at the end, - following the /. The string fed to the path matcher does not - include any text after the first ? or #, and those characters - are not allowed here. - items: - type: string - tests: - type: array - uniqueItems: true - description: | - The list of the expected URL mapping tests. Request to update this UrlMap - succeed only if all of the test cases pass. You can specify a maximum of - 100 tests per UrlMap. - items: - type: object - additionalProperties: false - properties: - description: - type: string - description: | - The test case description. - host: - type: string - description: | - The host portion of the URL. - path: - type: string - description: | - The path portion of the URL. - service: - type: string - description: | - The BackendService resource the given URL is expected to be mapped - to. - backendServices: - type: array - uniqueItems: true - description: | - Backend services to create. These services will deliver traffic to the - instance groups. - items: - type: object - additionalProperties: false - oneOf: - - required: - - healthCheck - - required: - - healthChecks - required: - - backends - properties: - resourceName: - type: string - description: Overrides resource name. - name: - type: string - description: The backend service name. Resource name is used if omitted. - description: - type: string - description: The resource description (optional). - backends: - type: array - uniqueItems: true - description: | - The list of backends (instance groups) to which the backend service - distributes traffic. - items: - type: object - additionalProperties: false - required: - - group - properties: - description: - type: string - description: The resource description (optional). - group: - type: string - description: | - The fully-qualified URL of the Instance Group resource. - balancingMode: - type: string - description: The balancing mode for the backend. - enum: - - UTILIZATION - - RATE - - CONNECTION - maxUtilization: - type: number - minimum: 0.0 - maximum: 1.0 - description: | - The ratio that defines the CPU utilization target for the - group. The default value is 0.8. Used when balancingMode is - UTILIZATION. - maxRate: - type: number - description: | - The maximum number of requests per second (RPS) for the - group. Can be used with either RATE or UTILIZATION balancing - mode. Mandatory with the RATE mode. For the RATE mode, either - maxRate or maxRatePerInstance must be set. - maxRatePerInstance: - type: number - description: | - The maximum number of requests per second (RPS) that a single - backend instance can handle. This is used to calculate the - capacity of the group. Can be used with any balancing mode. - For the RATE mode, either maxRate or maxRatePerInstance must - be set. - maxConnections: - type: number - description: | - The maximum number of simultaneous connections for the group. - Can be used with either CONNECTION or UTILIZATION balancing - mode. For the CONNECTION mode, either maxConnections or - maxConnectionsPerInstance must be set. - maxConnectionsPerInstance: - type: number - description: | - The maximum number of simultaneous connections that a single - backend instance can handle. This is used to calculate the - capacity of the group. Can be used in either CONNECTION or - UTILIZATION balancing modes. For the CONNECTION mode, either - maxConnections or maxConnectionsPerInstance must be set. - capacityScaler: - type: number - default: 1 - description: | - The multiplier applied to the group's maximum servicing - capacity (based on UTILIZATION, RATE, or CONNECTION). - minimum: 0 - maximum: 1 - healthCheck: - type: string - description: | - The URL of the HealthCheck, HttpHealthCheck, or HttpsHealthCheck - resource for healthchecking the backend service. - healthChecks: - type: array - uniqueItems: true - maxItems: 1 - description: | - The URL of the HealthCheck, HttpHealthCheck, or HttpsHealthCheck resource - for healthchecking the backend service. - items: - type: string - timeoutSec: - type: number - default: 30 - description: | - The number of seconds to wait for the backend response before - considering the request as failed. - protocol: - type: string - description: | - The protocol the backend service uses to communicate with backends. - The default is HTTP. For INTERNAL load balancing, the possible values are - TCP and UDP, and the default is TCP. - enum: - - HTTP - - HTTPS - - TCP - - UDP - - SSL - portName: - type: string - description: | - The backend port name. The same name must appear in the instance - groups referenced by this service. - enableCDN: - type: boolean - description: | - Defines whether Cloud CDN is enabled for the backend service. - sessionAffinity: - type: string - default: NONE - description: The type of the session affinity to use. - enum: - - NONE - - GENERATED_COOKIE - - CLIENT_IP - affinityCookieTtlSec: - type: integer - minimum: 0 - maximum: 86400 - description: | - The lifetime of cookies, in seconds, if sessionAffinity is - GENERATED_COOKIE. If set to 0, the cookies are non-persistent and - last only until the end of the browser session (or equivalent). - connectionDraining: - type: object - additionalProperties: false - description: The connection draining settings. - properties: - drainingTimeoutSec: - type: integer - description: | - The time period during which the instance is drained (not - accepting new connections but still processing the ones - accepted earlier). - cdnPolicy: - type: object - additionalProperties: false - description: The cloud CDN configuration for the backend service. - properties: - cacheKeyPolicy: - type: object - additionalProperties: false - description: The CacheKeyPolicy for the CdnPolicy. - properties: - includeProtocol: - type: boolean - description: | - Defines whether the HTTP and HTTPS requests are cached - separately. - includeHost: - type: boolean - description: | - If True, requests to different hosts are cached separately. - includeQueryString: - type: boolean - description: | - If True, includes query string parameters in the cache key - according to queryStringWhitelist and queryStringBlacklist. - If neither of the two is set, the entire query string is - included. If False, the query string is excluded from the - cache key entirely. - queryStringWhitelist: - type: array - uniqueItems: true - description: | - The names of the query string parameters to include in - cache keys. All other parameters are excluded. Specify - either queryStringWhitelist or queryStringBlacklist, not - both. '&' and '=' are percent-encoded and not treated - as delimiters. - items: - type: string - queryStringBlacklist: - type: array - uniqueItems: true - description: | - The names of the query string parameters to exclude from - the cache keys. All other parameters are included. Specify - either queryStringWhitelist or queryStringBlacklist, not - both. '&' and '=' are percent-encoded and not treated as - delimiters. - items: - type: string - signedUrlCacheMaxAgeSec: - type: string - default: 3600s - description: | - The maximum number of seconds the response to a signed URL - request is considered fresh. After this time period, the - response is revalidated before being served. When serving - responses to the signed URL requests, Cloud CDN internally - behaves as if all responses from the backend have the - "Cache-Control: public, max-age=[TTL]" header, - regardless of any existing Cache-Control header. The actual - headers served in responses are not altered. - quicOverride: - type: string - default: NONE - description: | - The QUIC override policy for the HTTPS Load Balancer's proxy - resource. Determines whether the load balancer will attempt to - negotiate QUIC with clients. Valid values are NONE, ENABLE, and - DISABLE. Enables QUIC when set to ENABLE; disables QUIC when set to - DISABLE. When set to NONE, the QUIC policy is used with no user - overrides. If no value is specified, defaults to NONE. This field - is used for the HTTPS load balancing mode, i.e., when both the URL Map - and SSL are configured. - enum: - - NONE - - ENABLE - - DISABLE - proxyHeader: - type: string - default: NONE - description: | - The type of proxy header to append before sending data to the - backend: NONE or PROXY_V1. The default is NONE. Used only for TCP and SSL - load balancing modes, i.e., when the URL Map is configured. - enum: - - NONE - - PROXY_V1 - ssl: - type: object - additionalProperties: false - description: | - Encryption settings for connections processed by the resource. - oneOf: - - required: - - sslCertificates - - required: - - certificate - properties: - sslCertificates: - type: array - uniqueItems: true - description: | - URLs to SslCertificate resources that are used to authenticate connections to Backends. - At least one SSL certificate must be specified. Currently, you may specify up to 15 SSL certificates. - - Authorization requires the following Google IAM permission on the specified resource sslCertificates: - - compute.sslCertificates.get - minItems: 0 - maxItems: 15 - items: - type: string - certificate: - type: object - additionalProperties: false - description: SSL certificate settings. - oneOf: - - required: - - url - - required: - - privateKey - - certificate - properties: - url: - type: string - description: The URL of an existing SSL certificate resource. - name: - type: string - description: The name of the SSL certificate resource. - description: - type: string - description: | - The description of the SSL certificate resource (optional). - privateKey: - type: string - description: The write-only private key in the PEM format. - certificate: - type: string - description: | - The local certificate file. The certificate must be in the PEM - format. The certificate chain must be no greater than 5 certs - long. The chain must include at least one intermediate cert. - sslPolicy: - type: string - description: | - The URL of the SslPolicy resource to be associated with this - resource. If not set, the proxy resource will have no SSL policy - configured. - - -outputs: - forwardingRuleName: - type: string - description: The name of the external load balancer's forwarding rule. - forwardingRuleSelfLink: - type: string - description: | - The URI (SelfLink) of the external load balancer's forwarding rule. - IPAddress: - type: string - description: | - The IP address on whose behalf the external load balancer - (the forwarding rule) operates. - backendServiceNames: - type: array - description: | - The names of the external load balancer's backend services. - backendServiceSelfLinks: - type: string - description: The URIs (SelfLinks) of the backend service resources. - targetProxyName: - type: string - description: | - The name of the target proxy resource created for the load balancer. - targetProxySelfLink: - type: string - description: | - The URI (SelfLink) of the URL target proxy resource. - targetProxyKind: - type: string - description: | - The type of the target proxy resource created for the load balancer. - certificateName: - type: string - description: | - The name of the SSL certificate, if one is to be created. - certificateSelfLink: - type: string - description: | - The URI (SelfLink) of the SSL certificate, if one is to be created. - -documentation: - - templates/external_load_balancer/README.md - -examples: - - templates/external_load_balancer/examples/external_load_balancer_http.yaml - - templates/external_load_balancer/examples/external_load_balancer_https.yaml - - templates/external_load_balancer/examples/external_load_balancer_ssl.yaml - - templates/external_load_balancer/examples/external_load_balancer_tcp.yaml diff --git a/dm/templates/external_load_balancer/tests/integration/external_load_balancer.bats b/dm/templates/external_load_balancer/tests/integration/external_load_balancer.bats deleted file mode 100755 index db060da3b00..00000000000 --- a/dm/templates/external_load_balancer/tests/integration/external_load_balancer.bats +++ /dev/null @@ -1,190 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores with dashes in the deployment name. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" - # Test specific variables. - export SSL_RES_NAME="test-ssl-elb-${RAND}" - export SSL_TARGET_NAME="${SSL_RES_NAME}-target" - export SSL_PORT_RANGE="443" - export SSL_PORT_NAME="https" - export SSL_PROXY_HEADER="PROXY_V1" - export SSL_BACKEND_NAME="test-ssl-backend-service-${RAND}" - export SSL_HEALTHCHECK_NAME="test-ssl-healthcheck-${RAND}" - export HTTPS_IGM_NAME="test-zonal-igm-https-${RAND}" - export HTTPS_RES_NAME="test-https-elb-${RAND}" - export HTTPS_CERT_NAME="${HTTPS_RES_NAME}-target-ssl-cert" - export HTTPS_URL_MAP_NAME="${HTTPS_RES_NAME}-url-map" - export HTTPS_FIRST_BACKEND_NAME="first-bs-${RAND}" - export HTTPS_HEALTHCHECK_NAME="test-healthcheck-https-${RAND}" - export HTTPS_PORT_RANGE="443" - export HTTPS_PORT_NAME="https" - export HTTPS_TARGET_NAME="${HTTPS_RES_NAME}-target" - export QUIC_OVERRIDE="ENABLE" - export HTTP_RES_NAME="http-elb-${RAND}" - export HTTP_NAME="http-elb-name-${RAND}" - export HTTP_URL_MAP_NAME="${HTTP_NAME}-url-map" - export HTTP_TARGET_NAME="${HTTP_NAME}-target" - export HTTP_DESCRIPTION="http-elb-description" - export HTTP_PORT_RANGE="80" - export HTTP_FIRST_BACKEND_NAME="first-http-bs-${RAND}" - export HTTP_FIRST_BACKEND_DESC="backend-service-description" - export HTTP_SECOND_BACKEND_NAME="second-http-bs-${RAND}" - export HTTP_PORT_NAME="http" - export HTTP_ENABLE_CDN="true" - export HTTP_HEALTHCHECK_NAME="test-healthcheck-http-${RAND}" - export HTTP_IGM_NAME="zonal-igm-http-${RAND}" - export TIMEOUT_SEC="70" - export SESSION_AFFINITY="GENERATED_COOKIE" - export SESSION_AFFINITY_TTL="1000" - export DRAINING_TIMEOUT="100" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < ${BATS_TEST_DIRNAME}/${TEST_NAME}.yaml > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - delete_config - fi - - # Per-test teardown steps. -} - - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - run gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config ${CONFIG} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "$output" - [[ "$status" -eq 0 ]] -} - -@test "Verifying HTTP ELB forwarding rule" { - run gcloud compute forwarding-rules describe "${HTTP_NAME}" \ - --global \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "IPProtocol: TCP" ]] - [[ "$output" =~ "loadBalancingScheme: EXTERNAL" ]] - [[ "$output" =~ "description: ${HTTP_DESCRIPTION}" ]] - [[ "$output" =~ "portRange: ${HTTP_PORT_RANGE}-${HTTP_PORT_RANGE}" ]] - [[ "$output" =~ "targetHttpProxies/${HTTP_TARGET_NAME}" ]] -} - -@test "Verifying HTTP ELB URL Map references for two backend services" { - run gcloud compute url-maps describe "${HTTP_URL_MAP_NAME}" \ - --format="value(defaultService)" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${HTTP_FIRST_BACKEND_NAME}" ]] - - run gcloud compute url-maps describe "${HTTP_URL_MAP_NAME}" \ - --format="value(pathMatchers[0].defaultService)" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${HTTP_SECOND_BACKEND_NAME}" ]] -} - -@test "Verifying HTTP ELB first backend service" { - run gcloud compute backend-services describe "${HTTP_FIRST_BACKEND_NAME}" \ - --global \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "sessionAffinity: ${SESSION_AFFINITY}" ]] - [[ "$output" =~ "affinityCookieTtlSec: ${SESSION_AFFINITY_TTL}" ]] - [[ "$output" =~ "${HTTP_IGM_NAME}" ]] - [[ "$output" =~ "drainingTimeoutSec: ${DRAINING_TIMEOUT}" ]] - [[ "$output" =~ "description: ${HTTP_FIRST_BACKEND_DESC}" ]] - [[ "$output" =~ "enableCDN: ${HTTP_ENABLE_CDN}" ]] - [[ "$output" =~ "${HTTP_HEALTHCHECK_NAME}" ]] - [[ "$output" =~ "loadBalancingScheme: EXTERNAL" ]] - [[ "$output" =~ "portName: ${HTTP_PORT_NAME}" ]] - [[ "$output" =~ "protocol: HTTP" ]] - [[ "$output" =~ "timeoutSec: ${TIMEOUT_SEC}" ]] -} - -@test "Verifying HTTP ELB second backend service" { - run gcloud compute backend-services describe \ - "${HTTP_SECOND_BACKEND_NAME}" --global \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] -} - -@test "Verifying HTTPS ELB forwarding rule" { - run gcloud compute forwarding-rules describe "${HTTPS_RES_NAME}" \ - --global \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "loadBalancingScheme: EXTERNAL" ]] - [[ "$output" =~ "portRange: ${HTTPS_PORT_RANGE}-${HTTPS_PORT_RANGE}" ]] - [[ "$output" =~ "targetHttpsProxies/${HTTPS_TARGET_NAME}" ]] -} - -@test "Verifying HTTPS ELB proxy settings" { - run gcloud compute target-https-proxies describe "${HTTPS_TARGET_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "quicOverride: ${QUIC_OVERRIDE}" ]] - [[ "$output" =~ "sslCertificates/${HTTPS_CERT_NAME}" ]] - [[ "$output" =~ "urlMaps/${HTTPS_URL_MAP_NAME}" ]] -} - -@test "Verifying SSL ELB forwarding rule" { - run gcloud compute forwarding-rules describe "${SSL_RES_NAME}" \ - --global \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "portRange: ${SSL_PORT_RANGE}-${SSL_PORT_RANGE}" ]] - [[ "$output" =~ "targetSslProxies/${SSL_TARGET_NAME}" ]] -} - -@test "Verifying SSL ELB proxy settings" { - run gcloud compute target-ssl-proxies describe "${SSL_TARGET_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "proxyHeader: ${SSL_PROXY_HEADER}" ]] - [[ "$output" =~ "sslCertificates/${HTTPS_CERT_NAME}" ]] -} - -@test "Deleting deployment" { - run gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" -q \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] -} - diff --git a/dm/templates/external_load_balancer/tests/integration/external_load_balancer.yaml b/dm/templates/external_load_balancer/tests/integration/external_load_balancer.yaml deleted file mode 100644 index d3939a33492..00000000000 --- a/dm/templates/external_load_balancer/tests/integration/external_load_balancer.yaml +++ /dev/null @@ -1,234 +0,0 @@ -# Test of the external load balancer template. - -imports: - - path: templates/external_load_balancer/external_load_balancer.py - name: external_load_balancer.py - -resources: - - name: ${SSL_RES_NAME} - type: external_load_balancer.py - properties: - portRange: ${SSL_PORT_RANGE} - backendServices: - - name: ${SSL_BACKEND_NAME} - healthChecks: - - $(ref.${SSL_HEALTHCHECK_NAME}.selfLink) - portName: ${SSL_PORT_NAME} - backends: - - group: $(ref.${HTTPS_IGM_NAME}.instanceGroup) - proxyHeader: ${SSL_PROXY_HEADER} - ssl: - certificate: - url: $(ref.${HTTPS_RES_NAME}.certificateSelfLink) - labels: - name: testlabelname - owner: megatron - - name: ${HTTP_RES_NAME} - type: external_load_balancer.py - properties: - name: ${HTTP_NAME} - description: ${HTTP_DESCRIPTION} - portRange: ${HTTP_PORT_RANGE} - backendServices: - - resourceName: ${HTTP_FIRST_BACKEND_NAME} - description: ${HTTP_FIRST_BACKEND_DESC} - timeoutSec: ${TIMEOUT_SEC} - sessionAffinity: ${SESSION_AFFINITY} - affinityCookieTtlSec: ${SESSION_AFFINITY_TTL} - connectionDraining: - drainingTimeoutSec: ${DRAINING_TIMEOUT} - portName: ${HTTP_PORT_NAME} - enableCDN: ${HTTP_ENABLE_CDN} - healthCheck: $(ref.${HTTP_HEALTHCHECK_NAME}.selfLink) - backends: - - group: $(ref.${HTTP_IGM_NAME}.instanceGroup) - - resourceName: ${HTTP_SECOND_BACKEND_NAME} - healthCheck: $(ref.${HTTP_HEALTHCHECK_NAME}.selfLink) - backends: - - group: $(ref.${HTTP_IGM_NAME}.instanceGroup) - urlMap: - defaultService: ${HTTP_FIRST_BACKEND_NAME} - hostRules: - - hosts: - - example.com - pathMatcher: example-matcher - pathMatchers: - - name: example-matcher - defaultService: ${HTTP_SECOND_BACKEND_NAME} - - - name: ${HTTPS_RES_NAME}-2 - type: external_load_balancer.py - properties: - portRange: ${HTTPS_PORT_RANGE} - quicOverride: ${QUIC_OVERRIDE} - backendServices: - - resourceName: ${HTTPS_FIRST_BACKEND_NAME}-2 - healthCheck: $(ref.${HTTPS_HEALTHCHECK_NAME}.selfLink) - portName: ${HTTPS_PORT_NAME} - backends: - - group: $(ref.${HTTPS_IGM_NAME}.instanceGroup) - urlMap: - defaultService: ${HTTPS_FIRST_BACKEND_NAME}-2 - ssl: - sslCertificates: - - $(ref.${HTTPS_RES_NAME}.certificateSelfLink) - - - name: ${HTTPS_RES_NAME} - type: external_load_balancer.py - properties: - portRange: ${HTTPS_PORT_RANGE} - quicOverride: ${QUIC_OVERRIDE} - backendServices: - - resourceName: ${HTTPS_FIRST_BACKEND_NAME} - healthCheck: $(ref.${HTTPS_HEALTHCHECK_NAME}.selfLink) - portName: ${HTTPS_PORT_NAME} - backends: - - group: $(ref.${HTTPS_IGM_NAME}.instanceGroup) - urlMap: - defaultService: ${HTTPS_FIRST_BACKEND_NAME} - ssl: - certificate: - certificate: | - -----BEGIN CERTIFICATE----- - MIIDODCCAiACCQCqBGuEeBXJTjANBgkqhkiG9w0BAQUFADBeMQswCQYDVQQGEwJD - QTEQMA4GA1UECAwHT250YXJpbzEQMA4GA1UEBwwHVG9yb250bzEVMBMGA1UECgwM - RXhhbXBsZSBPcmcuMRQwEgYDVQQDDAtleGFtcGxlLmNvbTAeFw0xODEwMTEyMDEy - MjVaFw0xOTEwMTEyMDEyMjVaMF4xCzAJBgNVBAYTAkNBMRAwDgYDVQQIDAdPbnRh - cmlvMRAwDgYDVQQHDAdUb3JvbnRvMRUwEwYDVQQKDAxFeGFtcGxlIE9yZy4xFDAS - BgNVBAMMC2V4YW1wbGUuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC - AQEA5yLVBWSqKRWSJrGh2nbW0j+Soy+uvDKCjSZtXIVIcWvSP+WXd6RE2zlsNee4 - UA90Jjhx6Si2MxraCNblm4MJSQg46irBn4Vmex/2iY2vD1MbMEusTNIfpDh3A27H - qdBe+WSO0uHXfmgC68M8mgTMcMLVoUsGgv8B710qBN1D9EKPZdEhYBAEhC1RFj+8 - o3krWa983QFVND4PeFJSZe0LxAg44/bHVFNKK5Ub+TPa9P3t86SpoNj9/M7DPSkh - AtqmHL+90G0gvkfvtaoNRHHdqk2X+Uz3cUXWgl8xE+X5wcWu7r3cMxPaJvuLldJa - SIcc4YIlKOB0nFDKPK+iy7LqPwIDAQABMA0GCSqGSIb3DQEBBQUAA4IBAQAdf4dG - IkEmdNmTeGPVOUis+7ziWzRPxt8Vpmuq24z4H9mIkAPo/2aLpMKH7bloeYvz8blu - 5VQZx7StoE5Sde1ss/AoaL1dVJi/dgmeN2cHy7J6POu3e9n6yXGiIh0qHlFe83nJ - RVIqtN9QGGuabGt3WGbKElMKwrCl9NGhExi/LntPFllXfTLb2pVGXH47ZihynbUj - 4S21+KnQUPjhg6Na6hP3qLVqSYtWataJFpy6DOG1wgoAWjagNc3ltGdmv6O/ZkI3 - 3vymENyn8G7n+Z1knXUXxv4rJoeiYbZ7/2/bQ8BTc/RI5qnhzO8VYmyAZrrKGZnD - W2xuikK4nQHsideP - -----END CERTIFICATE----- - privateKey: | - -----BEGIN RSA PRIVATE KEY----- - MIIEowIBAAKCAQEA5yLVBWSqKRWSJrGh2nbW0j+Soy+uvDKCjSZtXIVIcWvSP+WX - d6RE2zlsNee4UA90Jjhx6Si2MxraCNblm4MJSQg46irBn4Vmex/2iY2vD1MbMEus - TNIfpDh3A27HqdBe+WSO0uHXfmgC68M8mgTMcMLVoUsGgv8B710qBN1D9EKPZdEh - YBAEhC1RFj+8o3krWa983QFVND4PeFJSZe0LxAg44/bHVFNKK5Ub+TPa9P3t86Sp - oNj9/M7DPSkhAtqmHL+90G0gvkfvtaoNRHHdqk2X+Uz3cUXWgl8xE+X5wcWu7r3c - MxPaJvuLldJaSIcc4YIlKOB0nFDKPK+iy7LqPwIDAQABAoIBACHHm25gWeYLOwLg - rxDokVjE5b9ntpfWofHTDeSZrg61fVLNUSexIEcRy1jNdshsmcMEONGkm4w4fmkQ - Txo9OlwsEXVXrliL/IA+GZ/czxrkQHL8fD/17Z3oiqw7wn5074xvP9heHUpiFRsz - u2WfEeng76vU9Syr7DJ5YSy40beew48gJsfclbbAWl1GQ4s1wluoMlutCWjRKSAD - Qg1pjJQuTHDpD+PNgHrx4Xbyjyo6tGqcdt9B0crhuxwTZXUogQsRKRuMHlxxBsbm - kINhSsNf8V8iRCBtZ4FPWcq+Rk/KntNzB9NZQFmrH5hS0oQmZjzNAyzCXIaYTji7 - Ju8XDaECgYEA9hUDBBwniphLZxvIC8GHVgAFx76Xw085bksVI0jNl2yG1HgNjCNA - W7DXJnyAtJZQjaItZfvB/tMm/ZAypf77tnru2n/uRvB4uG1Yh7RSy7rhLpibvTpU - e+DHm2c6kVW6Ng4q6rFxaunpjKEaeZO8pKowUu4YGU9YaSqvIGwoPFcCgYEA8HOc - 1J5Rop56BPvJgozqQRRQ3Q3AFfzlyYEniF35twIqnehemU3nJMdVp9jbZizOcrmu - ZBma5c5P5Bjgam3SQvswTUxmbIZ2VvvXOv5aPeldNdFHrADpVmOdKwcPxQ8qx+IT - GK3rrVRkH6+JByseHhxl3igIM7fAtbd27ENDkFkCgYEA9YmhqMgu7CtpkUg3IwPH - dhgvrE6QP2EdfN+OB9bszNqM7hOb8Oh7nwGkq9Iu2gHh/nCDu+6ocwtdLERlRRxX - LI0dJwffSQlIaz0vyLg0pPOjHEtJmlZJVhHDGVy3I6zWUHlyeRr0gClFz/wv3n97 - CxKFhTns8dQp80WT2FYTD6ECgYBU0KMYSIQJNZda3Km22BflPtJLNwdzehJf4qPc - MTHdQPFhY87Cir0mtv1ayF6TiuiDhUWjX3jI6N47Wh8Gy5goMkxWZ8WVMFTb19eS - opeYURGk4x5B6MxlwZt1yvbgDrqLaQ5NXUPNjwAGQTe3hJkKDABOvZYvD/j04DMd - oZhaeQKBgGGgnxTTUTEdqZ/AsVD0NmaqauTmyjsUpmAph9oazERM729n9igob85z - KXQmD9gmtTrCuv8LGyEPFsIhlBTOlLyzHpMhI2Hd23hzQp8v09ZdDpx8SqHv0THW - y8YMreKih6+reSfC+GuOzQoKi4vTKO7wwuXYysXkg3juupqZ7Kab - -----END RSA PRIVATE KEY----- - -# Test prerequisites: - - - name: ${HTTP_IGM_NAME} - type: compute.v1.instanceGroupManager - properties: - instanceTemplate: $(ref.test-instance-template-http-${RAND}.selfLink) - zone: us-east1-c - targetSize: 1 - namedPorts: - - name: ${HTTP_PORT_NAME} - port: ${HTTP_PORT_RANGE} - - - name: ${HTTPS_IGM_NAME} - type: compute.v1.instanceGroupManager - properties: - instanceTemplate: $(ref.test-instance-template-https-${RAND}.selfLink) - zone: us-east1-b - targetSize: 1 - namedPorts: - - name: ${HTTPS_PORT_NAME} - port: ${HTTPS_PORT_RANGE} - - - name: test-instance-template-https-${RAND} - type: compute.v1.instanceTemplate - properties: - properties: - machineType: f1-micro - disks: - - autoDelete: true - boot: true - deviceName: boot - initializeParams: - sourceImage: projects/ubuntu-os-cloud/global/images/family/ubuntu-1804-lts - type: PERSISTENT - networkInterfaces: - - accessConfigs: - - name: External NAT - type: ONE_TO_ONE_NAT - network: global/networks/default - metadata: - items: - - key: startup-script - value: | - #! /bin/bash - sudo apt-get update - sudo apt-get install apache2 -y - sudo service apache2 restart - echo `hostname` | tee /var/www/html/index.html - sudo a2ensite default-ssl - sudo a2enmod ssl - sudo service apache2 restart - echo "https-`hostname`" | sudo tee /var/www/html/index.html - EOF" - - - name: test-instance-template-http-${RAND} - type: compute.v1.instanceTemplate - properties: - properties: - machineType: f1-micro - disks: - - autoDelete: true - boot: true - deviceName: boot - initializeParams: - sourceImage: projects/ubuntu-os-cloud/global/images/family/ubuntu-1804-lts - type: PERSISTENT - networkInterfaces: - - accessConfigs: - - name: External NAT - type: ONE_TO_ONE_NAT - network: global/networks/default - metadata: - items: - - key: startup-script - value: | - #! /bin/bash - sudo apt-get update - sudo apt-get install apache2 -y - sudo service apache2 restart - echo "http-`hostname`" | sudo tee /var/www/html/index.html - EOF" - - - name: ${HTTP_HEALTHCHECK_NAME} - type: compute.v1.httpHealthCheck - - - name: ${HTTPS_HEALTHCHECK_NAME} - type: compute.v1.httpsHealthCheck - - - name: ${SSL_HEALTHCHECK_NAME} - type: compute.v1.healthCheck - properties: - type: SSL - sslHealthCheck: - port: ${SSL_PORT_RANGE} diff --git a/dm/templates/firewall/README.md b/dm/templates/firewall/README.md deleted file mode 100644 index 3739cedeca2..00000000000 --- a/dm/templates/firewall/README.md +++ /dev/null @@ -1,67 +0,0 @@ -# Firewall - -This template creates firewall rules for a network. - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) -- Create a [network](../network/README.md) -- Grant the [compute.networkAdmin or compute.securityAdmin](https://cloud.google.com/compute/docs/access/iam) IAM role to the project service account - -## Deployment - -### Resources - -- [compute.beta.firewall](https://cloud.google.com/compute/docs/reference/rest/beta/firewalls) - - `Note:` The beta API supports the firewall log feature. - -### Properties - -See the `properties` section in the schema file(s): - -- [Firewall](firewall.py.schema) - -### Usage - -1. Clone the [Deployment Manager samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit): - -```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit -``` - -2. Go to the [dm](../../) directory: - -```shell - cd dm -``` - -3. Copy the example DM config to be used as a model for the deployment; in this case, [examples/firewall.yaml](examples/firewall.yaml): - -```shell - cp templates/firewall/examples/firewall.yaml my_firewall.yaml -``` - -4. Change the values in the config file to match your specific GCP setup (for properties, refer to the schema files listed above): - -```shell - vim my_firewall.yaml # <== change values to match your GCP setup -``` - -5. Create your deployment (replace with the relevant deployment name): - -```shell - gcloud deployment-manager deployments create \ - --config my_firewall.yaml -``` - -6. In case you need to delete your deployment: - -```shell - gcloud deployment-manager deployments delete -``` - -## Examples - -- [Firewall](examples/firewall.yaml) diff --git a/dm/templates/firewall/examples/firewall.yaml b/dm/templates/firewall/examples/firewall.yaml deleted file mode 100644 index 0ce4773b071..00000000000 --- a/dm/templates/firewall/examples/firewall.yaml +++ /dev/null @@ -1,40 +0,0 @@ -# Example of the firewall template usage. -# -# In this example, an array of firewall rules is applied to the -# 'network' VPC. -# -# Replace 'network-name' with a valid VPC network name. -# -imports: - - path: templates/firewall/firewall.py - name: firewall.py - -resources: - - name: test-firewall - type: firewall.py - properties: - network: - rules: - - name: allow-proxy-from-inside - allowed: - - IPProtocol: tcp - ports: - - "80" - - "443" - description: test rule for network-test - direction: INGRESS - sourceRanges: - - 10.0.0.0/8 - - name: allow-dns-from-inside - allowed: - - IPProtocol: udp - ports: - - "53" - - IPProtocol: tcp - ports: - - "53" - description: test rule for network-test-network - direction: EGRESS - priority: 20 - destinationRanges: - - 8.8.8.8/32 diff --git a/dm/templates/firewall/firewall.py b/dm/templates/firewall/firewall.py deleted file mode 100644 index ffd26991500..00000000000 --- a/dm/templates/firewall/firewall.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" This template creates firewall rules for a network. """ - -from hashlib import sha1 - - -def generate_config(context): - """ Entry point for the deployment resources. """ - - properties = context.properties - project_id = properties.get('project', context.env['project']) - network = properties.get('network') - if network: - if not ('/' in network or '.' in network): - network = 'global/networks/{}'.format(network) - else: - network = 'projects/{}/global/networks/{}'.format( - project_id, - properties.get('networkName', 'default') - ) - - resources = [] - out = {} - for i, rule in enumerate(properties['rules'], 1000): - res_name = sha1(rule['name'].encode('utf-8')).hexdigest()[:10] - - rule['network'] = network - rule['priority'] = rule.get('priority', i) - rule['project'] = project_id - resources.append( - { - 'name': res_name, - 'type': 'gcp-types/compute-v1:firewalls', - 'properties': rule - } - ) - - out[res_name] = { - 'selfLink': '$(ref.' + res_name + '.selfLink)', - 'creationTimestamp': '$(ref.' + res_name - + '.creationTimestamp)', - } - - outputs = [{'name': 'rules', 'value': out}] - - return {'resources': resources, 'outputs': outputs} diff --git a/dm/templates/firewall/firewall.py.schema b/dm/templates/firewall/firewall.py.schema deleted file mode 100644 index aaa5e3767e5..00000000000 --- a/dm/templates/firewall/firewall.py.schema +++ /dev/null @@ -1,288 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Firewall - author: Sourced Group Inc. - version: 1.0.0 - description: | - Deploys firewall rules. - - For more information on this resource: - https://cloud.google.com/vpc/docs/firewalls - - APIs endpoints used by this template: - - gcp-types/compute-v1:firewalls => - https://cloud.google.com/compute/docs/reference/rest/v1/firewalls - -additionalProperties: false - -required: - - rules - -allOf: - - oneOf: - - required: - - project - - required: - - network - - allOf: - - not: - required: - - project - - not: - required: - - network - - oneOf: - - required: - - networkName - - required: - - network - - allOf: - - not: - required: - - networkName - - not: - required: - - network - -properties: - project: - type: string - description: | - The project ID of the project containing firewall rules. - network: - type: string - description: | - URL of the network resource for this firewall rule. If not specified when creating a firewall rule, - the default network is used. - networkName: - type: string - description: | - The name of network to create firewalls in. - rules: - type: array - uniqueItems: True - description: | - An array of firewall rules. - items: - type: object - additionalProperties: false - properties: - name: - type: string - description: | - Name of the resource; provided by the client when the resource is created. The name must be 1-63 - characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match - the regular expression `a-z?. The first character must be a lowercase letter, and all following - characters (except for the last character) must be a dash, lowercase letter, or digit. The last character - must be a lowercase letter or digit. - Resource name would be used if omitted. - description: - type: string - description: | - An optional description of this resource. Provide this field when you create the resource. - priority: - type: integer - description: | - Priority for this rule. This is an integer between 0 and 65535, both inclusive. - Relative priorities determine which rule takes effect if multiple rules apply. Lower values indicate - higher priority. For example, a rule with priority 0 has higher precedence than a rule with priority 1. - DENY rules take precedence over ALLOW rules if they have equal priority. Note that VPC networks have - implied rules with a priority of 65535. To avoid conflicts with the implied rules, use a priority - number less than 65535. - - If the 'priority' field value is not set in the rule, the template sets - the priority to the same value as the rule's index in the array +1000. - For example, the priority for the first rule in the array becomes '1000', - for the second rule '1001', and so on. If the 'priority' field is not set in - any of the rules in the array, the ruleset is sorted by priority automatically. - We strongly advise being consistent in your use of the 'priority' field: - either provide or skip values in all instances throughout the ruleset. - sourceRanges: - type: array - uniqueItems: True - description: | - If source ranges are specified, the firewall rule applies only to traffic that has a source IP address in - these ranges. These ranges must be expressed in CIDR format. One or both of sourceRanges and sourceTags - may be set. If both fields are set, the rule applies to traffic that has a source IP address within - sourceRanges OR a source IP from a resource with a matching tag listed in the sourceTags field. - The connection does not need to match both fields for the rule to apply. Only IPv4 is supported. - items: - type: string - destinationRanges: - type: array - uniqueItems: True - description: | - If destination ranges are specified, the firewall rule applies only to traffic that has destination IP - address in these ranges. These ranges must be expressed in CIDR format. Only IPv4 is supported. - items: - type: string - sourceTags: - type: array - uniqueItems: True - description: | - If source tags are specified, the firewall rule applies only to traffic with source IPs that match the - primary network interfaces of VM instances that have the tag and are in the same VPC network. Source tags - cannot be used to control traffic to an instance's external IP address, it only applies to traffic between - instances in the same virtual network. Because tags are associated with instances, not IP addresses. - One or both of sourceRanges and sourceTags may be set. If both fields are set, the firewall applies to - traffic that has a source IP address within sourceRanges OR a source IP from a resource with a matching - tag listed in the sourceTags field. The connection does not need to match both fields - for the firewall to apply. - items: - type: string - targetTags: - type: array - uniqueItems: True - description: | - A list of tags that controls which instances the firewall rule applies to. If targetTags are specified, - then the firewall rule applies only to instances in the VPC network that have one of those tags. - If no targetTags are specified, the firewall rule applies to all instances on the specified network. - items: - type: string - sourceServiceAccounts: - type: array - uniqueItems: True - description: | - If source service accounts are specified, the firewall rules apply only to traffic originating from - an instance with a service account in this list. Source service accounts cannot be used to control - traffic to an instance's external IP address because service accounts are associated with an instance, - not an IP address. sourceRanges can be set at the same time as sourceServiceAccounts. If both are set, - the firewall applies to traffic that has a source IP address within the sourceRanges OR a source IP - that belongs to an instance with service account listed in sourceServiceAccount. The connection does - not need to match both fields for the firewall to apply. sourceServiceAccounts cannot be used - at the same time as sourceTags or targetTags. - items: - type: string - targetServiceAccounts: - type: array - uniqueItems: True - description: | - The email of a service account indicating the set of instances to which firewall rules apply. - items: - type: string - allowed: - type: array - uniqueItems: True - description: | - The list of ALLOW rules specified by this firewall. Each rule specifies a protocol and - port-range tuple that describes a permitted connection. - items: - type: object - additionalProperties: false - required: - - IPProtocol - properties: - IPProtocol: - type: string - description: | - The IP protocol to which this rule applies. The protocol type is required when creating - a firewall rule. This value can either be one of the following well known protocol strings - (tcp, udp, icmp, esp, ah, ipip, sctp) or the IP protocol number. - ports: - type: array - uniqueItems: True - description: | - An optional list of ports to which this rule applies. This field is only applicable for - the UDP or TCP protocol. Each entry must be either an integer or a range. - If not specified, this rule applies to connections through any port. - - Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. - items: - type: string - denied: - type: array - uniqueItems: True - description: | - The list of DENY rules specified by this firewall. Each rule specifies a protocol and port-range - tuple that describes a denied connection. - items: - type: object - additionalProperties: false - required: - - IPProtocol - properties: - IPProtocol: - type: string - description: | - The IP protocol to which this rule applies. The protocol type is required when creating - a firewall rule. This value can either be one of the following well known protocol strings - (tcp, udp, icmp, esp, ah, ipip, sctp) or the IP protocol number. - ports: - type: array - uniqueItems: True - description: | - An optional list of ports to which this rule applies. This field is only applicable for - the UDP or TCP protocol. Each entry must be either an integer or a range. - If not specified, this rule applies to connections through any port. - - Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. - items: - type: string - direction: - type: string - description: | - Direction of traffic to which this firewall applies, either INGRESS or EGRESS. - The default is INGRESS. For INGRESS traffic, you cannot specify the destinationRanges field, - and for EGRESS traffic, you cannot specify the sourceRanges or sourceTags fields. - enum: - - INGRESS - - EGRESS - logConfig: - type: object - description: | - This field denotes the logging options for a particular firewall rule. - If logging is enabled, logs will be exported to Stackdriver. - required: - - enable - properties: - enable: - type: boolean - description: | - This field denotes whether to enable logging for a particular firewall rule. - disabled: - type: boolean - description: | - Denotes whether the firewall rule is disabled. When set to true, the firewall rule is not - enforced and the network behaves as if it did not exist. If this is unspecified, the firewall rule will be enabled. - - - -outputs: - rules: - type: array - description: | - Array of firewall rule details. For example, the output can be - referenced as: - $(ref..rules..selfLink) - items: - description: The name of the firewall rule resource. - patternProperties: - ".*": - type: object - description: Details for a firewall rule resource. - properties: - selfLink: - type: string - description: The URI (SelfLink) of the firewall rule resource. - creationTimestamp: - type: string - description: Creation timestamp in RFC3339 text format. - -documentation: - - templates/firewall/README.md - -examples: - - templates/firewall/examples/firewall.yaml diff --git a/dm/templates/firewall/tests/integration/firewall.bats b/dm/templates/firewall/tests/integration/firewall.bats deleted file mode 100644 index 5e5daf779e0..00000000000 --- a/dm/templates/firewall/tests/integration/firewall.bats +++ /dev/null @@ -1,80 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < "templates/firewall/tests/integration/${TEST_NAME}.yaml" > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; this is executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - gcloud compute networks create network-test-${RAND} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --description "integration test ${RAND}" \ - --subnet-mode custom - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; this is executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - delete_config - gcloud compute networks delete network-test-${RAND} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -q - rm -f "${RANDOM_FILE}" - fi - - # Per-test teardown steps. -} - - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" --config "${CONFIG}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -} - -@test "Verifying that resources were created in deployment ${DEPLOYMENT_NAME}" { - run gcloud compute firewall-rules list --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$output" =~ "allow-proxy-from-inside" ]] - [[ "$output" =~ "allow-dns-from-inside" ]] -} - -@test "Deleting deployment" { - gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -q - - run gcloud compute firewall-rules list --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ ! "$output" =~ "allow-proxy-from-inside" ]] - [[ ! "$output" =~ "allow-dns-from-inside" ]] -} diff --git a/dm/templates/firewall/tests/integration/firewall.yaml b/dm/templates/firewall/tests/integration/firewall.yaml deleted file mode 100644 index 8bcbc91ef9d..00000000000 --- a/dm/templates/firewall/tests/integration/firewall.yaml +++ /dev/null @@ -1,38 +0,0 @@ -# Test of the firewall template. -# -# Variables: -# RAND: A random string used by the testing suite - -imports: - - path: templates/firewall/firewall.py - name: firewall.py - -resources: - - name: test-firewall-${RAND} - type: firewall.py - properties: - network: network-test-${RAND} - rules: - - name: allow-proxy-from-inside - allowed: - - IPProtocol: tcp - ports: - - "80" - - "443" - description: test rule for network-test-${RAND} - direction: INGRESS - sourceRanges: - - 10.0.0.0/8 - - name: allow-dns-from-inside - allowed: - - IPProtocol: udp - ports: - - "53" - - IPProtocol: tcp - ports: - - "53" - description: test rule for network-test-${RAND} - direction: EGRESS - priority: 20 - destinationRanges: - - 8.8.8.8/32 diff --git a/dm/templates/folder/README.md b/dm/templates/folder/README.md deleted file mode 100644 index 3557fc98c75..00000000000 --- a/dm/templates/folder/README.md +++ /dev/null @@ -1,66 +0,0 @@ -# Folder - -This template creates a folder under an organization or under a parent folder. - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) -- Grant the [resourcemanager.folderAdmin or resourcemanager.folderCreator](https://cloud.google.com/resource-manager/docs/access-control-folders) IAM role to the project service account - -## Deployment - -### Resources - -- [gcp-types/cloudresourcemanager-v2:folders](https://cloud.google.com/resource-manager/reference/rest/v2/folders/create) - - -### Properties - -See `properties` section in the schema file(s): - -- [Folder](folder.py.schema) - -### Usage - - -1. Clone the [Deployment Manager samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit): - -```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit -``` - -2. Go to the [dm](../../) directory: - -```shell - cd dm -``` - -3. Copy the example DM config to be used as a model for the deployment; in this case, [examples/folder.yaml](examples/folder.yaml): - -```shell - cp templates/folder/examples/folder.yaml my_folder.yaml -``` - -4. Change the values in the config file to match your specific GCP setup (for properties, refer to the schema files listed above): - -```shell - vim my_folder.yaml # <== change values to match your GCP setup -``` - -5. Create your deployment (replace with the relevant deployment name): - -```shell - gcloud deployment-manager deployments create \ - --config my_folder.yaml -``` - -6. In case you need to delete your deployment: - -```shell - gcloud deployment-manager deployments delete -``` - -## Examples - -- [Folder](examples/folder.yaml) diff --git a/dm/templates/folder/examples/folder.yaml b/dm/templates/folder/examples/folder.yaml deleted file mode 100644 index acf9a2348be..00000000000 --- a/dm/templates/folder/examples/folder.yaml +++ /dev/null @@ -1,22 +0,0 @@ -# Example of the Folder template usage. -# -# Replace the following placeholders with valid values: -# : the organization id the folder will be created under -# : the folder id the folder will be created under -# - -imports: -- path: templates/folder/folder.py - name: folder.py - -resources: -- name: my-folder - type: folder.py - properties: - folders: - - name: org-folder - orgId: organizations/ - displayName: Folder under Organization - - name: folder-folder - folderId: folders/ - displayName: Folder under Folder diff --git a/dm/templates/folder/folder.py b/dm/templates/folder/folder.py deleted file mode 100644 index 78e31909940..00000000000 --- a/dm/templates/folder/folder.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" - This template creates a folder under an organization or under a - parent folder. -""" - -from hashlib import sha1 - - -def generate_config(context): - """ Entry point for the deployment resources. """ - - resources = [] - out = {} - for folder in context.properties.get('folders', []): - if folder.get('parent'): - parent = '{}s/{}'.format(folder['parent']['type'], folder['parent']['id']) - else: - parent = folder.get('orgId', folder.get('folderId')) - - suffix = folder.get( - 'resourceNameSuffix', - sha1('{}/folders/{}'.format(parent, folder.get('displayName')).encode('utf-8')).hexdigest()[:10] - ) - create_folder = '{}-{}'.format(context.env['name'], suffix) - resources.append( - { - 'name': create_folder, - # https://cloud.google.com/resource-manager/reference/rest/v2/folders - 'type': 'gcp-types/cloudresourcemanager-v2:folders', - 'properties': - { - 'parent': parent, - 'displayName': folder['displayName'] - } - } - ) - - out[create_folder] = { - 'name': '$(ref.{}.name)'.format(create_folder), - 'parent': '$(ref.{}.parent)'.format(create_folder), - 'displayName': '$(ref.{}.displayName)'.format(create_folder), - 'createTime': '$(ref.{}.createTime)'.format(create_folder), - 'lifecycleState': '$(ref.{}.lifecycleState)'.format(create_folder) - } - - outputs = [{'name': 'folders', 'value': out}] - - return {'resources': resources, 'outputs': outputs} diff --git a/dm/templates/folder/folder.py.schema b/dm/templates/folder/folder.py.schema deleted file mode 100644 index f148ad5c682..00000000000 --- a/dm/templates/folder/folder.py.schema +++ /dev/null @@ -1,141 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Folder - author: Sourced Group Inc. - version: 1.0.0 - description: | - Creates a folder under an organization or under a parent - folder. - - For more information on this resource: - https://cloud.google.com/resource-manager/ - - APIs endpoints used by this template: - - gcp-types/cloudresourcemanager-v2:folders => - https://cloud.google.com/resource-manager/reference/rest/v2/folders - -additionalProperties: false - -required: - - folders - -properties: - folders: - type: array - description: | - List of folders to create. - minItems: 1 - uniqueItems: True - items: - type: object - oneOf: - - required: - - orgId - - required: - - folderId - - required: - - parent - required: - - displayName - properties: - resourceNameSuffix: - type: string - description: | - Optional resource name suffix - orgId: - type: string - pattern: ^organizations\/[0-9]{8,25}$ - description: | - The organization ID. If this field is set, the folder is - created under the organization. The value must conform to the - format `organizations/`. For example, - `organizations/111122223333`. - DEPRECATED. Please use "parent" - folderId: - type: string - pattern: ^folders\/[0-9]{8,25}$ - description: | - The folder ID. If this field is set, the folder is created - under the folder specified by the ID. The value must conform - to the format `folders/`. For example, - `folders/1234567890`. - DEPRECATED. Please use "parent" - parent: - type: object - additionalProperties: false - description: The parent of the folder. - required: - - type - - id - properties: - type: - type: string - decription: The parent type (organization or folder). - enum: - - organization - - folder - default: organization - id: - type: [integer, string] - description: | - The ID of the folder's parent. - pattern: ^[0-9]{8,25}$ - displayName: - type: string - pattern: ^[a-zA-Z0-9]([a-zA-Z0-9_\- ]{0,28}[a-zA-Z0-9])?$ - description: | - The folder’s display name. A folder’s display name must be unique amongst its siblings, e.g. no two folders - with the same parent can share the same display name. The display name must start and end with - a letter or digit, may contain letters, digits, spaces, hyphens and underscores and - can be no longer than 30 characters. - -outputs: - folders: - type: array - description: Array of folder resource information. - items: - description: | - The name of the folder resource. For example, the output can be - referenced as: $(ref..folders..parent) - patternProperties: - ".*": - type: object - description: Details for a folder resource. - properties: - name: - type: string - description: | - Name of the folder resource in the format - `folders/`. - parent: - type: string - description: | - The resource name of the parent Folder or Organization. - displayName: - type: string - description: The folder's display name. - createTime: - type: string - description: Creation timestamp in RFC3339 text format. - lifecycleState: - type: string - description: The Folder's current lifecycle state. - -documentation: - - templates/folder/README.md - -examples: - - templates/folder/examples/folder.yaml diff --git a/dm/templates/folder/tests/integration/folder.bats b/dm/templates/folder/tests/integration/folder.bats deleted file mode 100644 index 345aedd0e32..00000000000 --- a/dm/templates/folder/tests/integration/folder.bats +++ /dev/null @@ -1,106 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < "templates/folder/tests/integration/${TEST_NAME}.yaml" > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function get_test_folder_id() { - # Get the test folder ID and make it available. - TEST_ORG_FOLDER_NAME=$(gcloud alpha resource-manager folders list \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --organization "${CLOUD_FOUNDATION_ORGANIZATION_ID}" | \ - grep "test-org-folder-${RAND}") - - export TEST_ORG_FOLDER_NAME=`echo ${TEST_ORG_FOLDER_NAME} | cut -d ' ' -f 3` -} - -function setup() { - # Global setup; this is executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - gcloud alpha resource-manager folders create \ - --display-name="test-org-folder-${RAND}" \ - --organization="${CLOUD_FOUNDATION_ORGANIZATION_ID}" - get_test_folder_id - create_config - fi - - # Per-test setup steps. - get_test_folder_id -} - -function teardown() { - # Global teardown; this is executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - get_test_folder_id - gcloud alpha resource-manager folders delete "${TEST_ORG_FOLDER_NAME}" - rm -f "${RANDOM_FILE}" - delete_config - fi - - # Per-test teardown steps. -} - - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config ${CONFIG} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -} - -@test "Verifying that a folder was created under organization in deployment ${DEPLOYMENT_NAME}" { - run gcloud alpha resource-manager folders list \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --organization "${CLOUD_FOUNDATION_ORGANIZATION_ID}" - [[ "$output" =~ "Folder under Org ${RAND}" ]] -} - -@test "Verifying that a folder was created under the specified folder in deployment ${DEPLOYMENT_NAME}" { - run gcloud alpha resource-manager folders list \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --folder "${TEST_ORG_FOLDER_NAME}" - [[ "$output" =~ "Folder under Folder ${RAND}" ]] -} - -@test "Deleting deployment" { - gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -q - - run gcloud run gcloud alpha resource-manager folders list \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --organization "${CLOUD_FOUNDATION_ORGANIZATION_ID}" - [[ ! "$output" =~ "Folder Under Org ${RAND}" ]] - - run gcloud alpha resource-manager folders list \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --folder "${TEST_ORG_FOLDER_NAME}" - [[ ! "$output" =~ "Folder Under Folder ${RAND}" ]] -} diff --git a/dm/templates/folder/tests/integration/folder.yaml b/dm/templates/folder/tests/integration/folder.yaml deleted file mode 100644 index 35a168a457c..00000000000 --- a/dm/templates/folder/tests/integration/folder.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# Test of the folder template. -# -# Variables: -# RAND: A random string used by the testing suite. -# - -imports: -- path: templates/folder/folder.py - name: folder.py - -resources: -- name: my-org-folder-${RAND} - type: folder.py - properties: - folders: - - name: org-folder-${RAND} - orgId: organizations/${CLOUD_FOUNDATION_ORGANIZATION_ID} - displayName: Folder under Org ${RAND} - - name: folder-folder-${RAND} - folderId: folders/${TEST_ORG_FOLDER_NAME} - displayName: Folder under Folder ${RAND} diff --git a/dm/templates/forwarding_rule/README.md b/dm/templates/forwarding_rule/README.md deleted file mode 100644 index 6c5807d1de0..00000000000 --- a/dm/templates/forwarding_rule/README.md +++ /dev/null @@ -1,71 +0,0 @@ -# Forwarding Rule - -This template creates a forwarding rule. - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) -- Grant the [compute.admin](https://cloud.google.com/compute/docs/access/iam) or -[compute.networkAdmin](https://cloud.google.com/compute/docs/access/iam) IAM -role to the Deployment Manager service account - -## Deployment - -### Resources - -- [compute.v1.globalForwardingRule](https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules) -- [compute.v1.forwardingRule](https://cloud.google.com/compute/docs/reference/latest/forwardingRules) - -### Properties - -See the `properties` section in the schema file(s): -- [Forwarding Rule](forwarding_rule.py.schema) - -### Usage - -1. Clone the [Deployment Manager Samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit): - -```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit -``` - -2. Go to the [dm](../../) directory: - -```shell - cd dm -``` - -3. Copy the example DM config to be used as a model for the deployment; in this -case, [examples/forwarding\_rule\_global.yaml](examples/forwarding_rule_global.yaml): - -```shell - cp templates/forwarding_rule/examples/forwarding_rule_global.yaml \ - my_forwarding_rule.yaml -``` - -4. Change the values in the config file to match your specific GCP setup (for -properties, refer to the schema files listed above): - -```shell - vim my_forwarding_rule.yaml # <== change values to match your GCP setup -``` - -5. Create your deployment (replace with the relevant -deployment name): - -```shell - gcloud deployment-manager deployments create \ - --config my_forwarding_rule.yaml -``` - -6. In case you need to delete your deployment: - -```shell - gcloud deployment-manager deployments delete -``` - -## Examples - -- [Global Forwarding Rule](examples/forwarding_rule_global.yaml) -- [Regional Forwarding Rule](examples/forwarding_rule_regional.yaml) diff --git a/dm/templates/forwarding_rule/examples/forwarding_rule_global.yaml b/dm/templates/forwarding_rule/examples/forwarding_rule_global.yaml deleted file mode 100644 index 0c2d3154989..00000000000 --- a/dm/templates/forwarding_rule/examples/forwarding_rule_global.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# Example of the forwarding rule template usage. -# -# In this example, a global forwarding rule is created. -# -# Replace the following placeholders with valid values: -# : a URL of a global load balancing -# resource to receive the matched traffic; e.g., targetHttpProxy -# : a range of port numbers traffic from which -# must be forwarded to the target; e.g., '80, 8080' -# : Name of a label -# : Value of a label -# -imports: - - path: templates/forwarding_rule/forwarding_rule.py - name: forwarding_rule.py - -resources: - - name: external-global-forwarding-rule - type: forwarding_rule.py - properties: - loadBalancingScheme: EXTERNAL - target: - portRange: - labels: - : diff --git a/dm/templates/forwarding_rule/examples/forwarding_rule_regional.yaml b/dm/templates/forwarding_rule/examples/forwarding_rule_regional.yaml deleted file mode 100644 index 9f6107c0bfd..00000000000 --- a/dm/templates/forwarding_rule/examples/forwarding_rule_regional.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# Example of the forwarding rule template usage. -# -# In this example, a regional forwarding rule is created. -# -# Replace the following placeholders with valid values: -# : a URL of a region where the forwarding rule resides -# : a URL of a backend service residing in -# the same region as the forwarding rule -# : a number of a port traffic from which must be forwarded to the -# backend service; e.g., 80 -# -imports: - - path: templates/forwarding_rule/forwarding_rule.py - name: forwarding_rule.py - -resources: - - name: internal-regional-forwarding-rule - type: forwarding_rule.py - properties: - region: - ports: - - - loadBalancingScheme: INTERNAL - backendService: diff --git a/dm/templates/forwarding_rule/forwarding_rule.py b/dm/templates/forwarding_rule/forwarding_rule.py deleted file mode 100644 index 662a34eaabd..00000000000 --- a/dm/templates/forwarding_rule/forwarding_rule.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" This template creates a forwarding rule. """ - -REGIONAL_GLOBAL_TYPE_NAMES = { - # https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules - True: { - 'GA': 'gcp-types/compute-v1:forwardingRules', - 'Beta': 'gcp-types/compute-beta:forwardingRules' - }, - # https://cloud.google.com/compute/docs/reference/rest/v1/globalForwardingRules - False: { - 'GA': 'gcp-types/compute-v1:globalForwardingRules', - 'Beta': 'gcp-types/compute-beta:globalForwardingRules' - } -} - - -def set_optional_property(destination, source, prop_name): - """ Copies the property value, if present. """ - - if prop_name in source: - destination[prop_name] = source[prop_name] - - -def get_forwarding_rule_outputs(res_name, region): - """ Creates outputs for the forwarding rule. """ - - outputs = [ - { - 'name': 'name', - 'value': '$(ref.{}.name)'.format(res_name) - }, - { - 'name': 'selfLink', - 'value': '$(ref.{}.selfLink)'.format(res_name) - }, - { - 'name': 'IPAddress', - 'value': '$(ref.{}.IPAddress)'.format(res_name) - } - ] - - if region: - outputs.append({'name': 'region', 'value': region}) - - return outputs - - -def generate_config(context): - """ Entry point for the deployment resources. """ - - properties = context.properties - name = properties.get('name', context.env['name']) - project_id = properties.get('project', context.env['project']) - is_regional = 'region' in properties - FW_rule_version = 'Beta' if 'labels' in properties else 'GA' - region = properties.get('region') - rule_properties = { - 'name': name, - 'project': project_id, - } - - resource = { - 'name': context.env['name'], - 'type': REGIONAL_GLOBAL_TYPE_NAMES[is_regional][FW_rule_version], - 'properties': rule_properties - } - - optional_properties = [ - 'description', - 'IPAddress', - 'IPProtocol', - 'portRange', - 'ports', - 'region', - 'target', - 'loadBalancingScheme', - 'subnetwork', - 'network', - 'backendService', - 'ipVersion', - 'serviceLabel', - 'networkTier', - 'allPorts', - 'labels', - ] - - for prop in optional_properties: - set_optional_property(rule_properties, properties, prop) - - outputs = get_forwarding_rule_outputs(context.env['name'], region) - - return {'resources': [resource], 'outputs': outputs} diff --git a/dm/templates/forwarding_rule/forwarding_rule.py.schema b/dm/templates/forwarding_rule/forwarding_rule.py.schema deleted file mode 100644 index c7ee6b3502d..00000000000 --- a/dm/templates/forwarding_rule/forwarding_rule.py.schema +++ /dev/null @@ -1,351 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Forwarding Rule - author: Sourced Group Inc. - version: 1.0.0 - description: | - Creates a forwarding rule. - - For more information on this resource: - https://cloud.google.com/load-balancing/docs/forwarding-rules - - APIs endpoints used by this template: - - gcp-types/compute-v1:forwardingRules => - https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules - - gcp-types/compute-v1:globalForwardingRules => - https://cloud.google.com/compute/docs/reference/rest/v1/globalForwardingRules - -additionalProperties: false - -allOf: - - oneOf: - - properties: - loadBalancingScheme: - enum: - - INTERNAL - IPProtocol: - enum: - - TCP - - UDP - - not: - properties: - loadBalancingScheme: - enum: - - INTERNAL - - oneOf: - - allOf: - - required: - - region - - properties: - networkTier: - enum: - - PREMIUM - - STANDARD - - allOf: - - not: - required: - - region - - properties: - networkTier: - enum: - - PREMIUM - - oneOf: - - properties: - loadBalancingScheme: - enum: - - INTERNAL_SELF_MANAGED - IPProtocol: - enum: - - TCP - - not: - properties: - loadBalancingScheme: - enum: - - INTERNAL_SELF_MANAGED - - oneOf: - - allOf: - - properties: - IPProtocol: - enum: - - TCP - - UDP - - SCTP - - required: - - portRange - - not: - required: - - portRange - - oneOf: - - allOf: - - properties: - loadBalancingScheme: - enum: - - INTERNAL - - anyOf: - - required: - - ports - - required: - - backendService - - required: - - subnetwork - - required: - - serviceLabel - - allOf: - - not: - required: - - ports - - not: - required: - - backendService - - not: - required: - - subnetwork - - not: - required: - - serviceLabel - - oneOf: - - allOf: - - properties: - loadBalancingScheme: - enum: - - INTERNAL - - INTERNAL_SELF_MANAGED - - required: - - network - - not: - required: - - network - - oneOf: - - allOf: - - loadBalancingScheme: - enum: - - EXTERNAL - - not: - required: - - region - - required: - - ipVersion - - not: - required: - - ipVersion - -properties: - name: - type: string - description: | - Must comply with RFC1035. Specifically, the name must be 1-63 characters long and match - the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, - and all following characters must be a dash, lowercase letter, or digit, except the last character, - which cannot be a dash. - Resource name would be used if omitted. - project: - type: string - description: | - The project ID of the project containing the forwarding rule. The - Google apps domain is prefixed if applicable. - description: - type: string - description: The resource description (optional). - region: - type: string - description: | - The URL of the region where the regional forwarding rule resides. - Not applicable to global forwarding rules. - IPAddress: - type: string - description: | - The IP address that this forwarding rule is serving on behalf of. - - Addresses are restricted based on the forwarding rule's load balancing scheme - (EXTERNAL or INTERNAL) and scope (global or regional). - - When the load balancing scheme is EXTERNAL, for global forwarding rules, the address must be a global IP, - and for regional forwarding rules, the address must live in the same region as the forwarding rule. - If this field is empty, an ephemeral IPv4 address from the same scope (global or regional) will be assigned. - A regional forwarding rule supports IPv4 only. A global forwarding rule supports either IPv4 or IPv6. - - When the load balancing scheme is INTERNAL_SELF_MANAGED, this must be a URL reference to an existing Address - resource ( internal regional static IP address), with a purpose of GCE_END_POINT and addressType of INTERNAL. - - When the load balancing scheme is INTERNAL, this can only be an RFC 1918 IP address belonging to the - network/subnet configured for the forwarding rule. By default, if this field is empty, an ephemeral - internal IP address will be automatically allocated from the IP range of the subnet or network - configured for this forwarding rule. - - An address can be specified either by a literal IP address or a URL reference to an existing Address resource. - The following examples are all valid: - - 100.1.2.3 - - https://www.googleapis.com/compute/v1/projects/project/regions/region/addresses/address - - projects/project/regions/region/addresses/address - - regions/region/addresses/address - - global/addresses/address - - address - IPProtocol: - type: string - description: | - The IP protocol to which this rule applies. Valid options are TCP, UDP, ESP, AH, SCTP or ICMP. - - When the load balancing scheme is INTERNAL, only TCP and UDP are valid. - When the load balancing scheme is INTERNAL_SELF_MANAGED, only TCPis valid. - enum: - - TCP - - UDP - - ESP - - AH - - SCTP - - ICMP - portRange: - type: [integer,string] - description: | - The port range; only packets addressed to ports in that range are - forwarded to the target. Used in conjunction with the target field - for TargetHttpProxy, TargetHttpsProxy, TargetSslProxy, TargetTcpProxy, - TargetVpnGateway, TargetPool, and TargetInstance. Applicable only - when IPProtocol is TCP, UDP, or SCTP. - ports: - type: array - uniqItems: true - description: | - This field is used along with the backendService field for internal load balancing. - - When the load balancing scheme is INTERNAL, a list of ports can be configured, for example, - ['80'], ['8000','9000'] etc. Only packets addressed to these ports will be forwarded to the - backends configured with this forwarding rule. - - You may specify a maximum of up to 5 ports. - maxItems: 5 - items: - type: integer - minimum: 1 - maximum: 65535 - target: - type: string - description: | - The URL of the target resource to receive the matched traffic. For regional forwarding rules, - this target must live in the same region as the forwarding rule. For global forwarding rules, this - target must be a global load balancing resource. The forwarded traffic must be of a type appropriate - to the target object. For INTERNAL_SELF_MANAGED load balancing, only HTTP and HTTPS targets are valid. - - Authorization requires one or more of the following Google IAM permissions on the specified resource target: - - compute.targetHttpProxies.use - - compute.targetHttpsProxies.use - - compute.targetInstances.use - - compute.targetPools.use - - compute.targetSslProxies.use - - compute.targetTcpProxies.use - - compute.targetVpnGateways.use - loadBalancingScheme: - type: string - description: | - Defines what the forwarding rule is used for. - INTERNAL - used for Internal Network load balancing - (TCP, UDP) - INTERNAL_SELF_MANAGED - used for Internal Global HTTP(S) load balancing - EXTERNAL - used for External load balancing (HTTP(S), External - TCP/UDP, SSL Proxy) - enum: - - INTERNAL - - INTERNAL_SELF_MANAGED - - EXTERNAL - subnetwork: - type: string - description: | - The subnetwork the load-balanced IP must belong to for the forwarding rule. - Used only for INTERNAL load balancing. - serviceLabel: - type: string - description: | - An optional prefix to the service name for this Forwarding Rule. If specified, will be the first label - of the fully qualified service name. - - The label must be 1-63 characters long, and comply with RFC1035. Specifically, the label must be 1-63 characters - long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a - lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except - the last character, which cannot be a dash. - - This field is only used for internal load balancing. - labels: - type: object - description: | - Labels to apply to this instance. These can be later modified by the setLabels method. - - An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. - - networkTier: - type: string - description: | - This signifies the networking tier used for configuring this load balancer and can only - take the following values: PREMIUM , STANDARD. - - For regional ForwardingRule, the valid values are PREMIUM and STANDARD. For GlobalForwardingRule, - the valid value is PREMIUM. - - If this field is not specified, it is assumed to be PREMIUM. If IPAddress is specified, - this value must be equal to the networkTier of the Address. - enum: - - STANDARD - - PREMIUM - network: - type: string - description: | - The network the load-balanced IP must belong to for this forwarding rule. - If no value is provided, the default network is used. Used only for - INTERNAL and INTERNAL_SELF_MANAGED load balancing. - backendService: - type: string - description: | - The backend service URL to receive the matched traffic. - Used only for INTERNAL load balancing. - For example: https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/backendServices/{backendName} - ipVersion: - type: string - description: | - The IP Version the forwarding rule uses. Can be - specified only for external global forwarding rules. - enum: - - IPV4 - - IPV6 - allPorts: - type: boolean - description: | - This field is used along with the backendService field for internal load balancing or with the target - field for internal TargetInstance. This field cannot be used with port or portRange fields. - - When the load balancing scheme is INTERNAL and protocol is TCP/UDP, specify this field to allow packets - addressed to any ports will be forwarded to the backends configured with this forwarding rule. - -outputs: - region: - type: string - description: | - The URL of the region where the regional forwarding rule resides. - name: - type: string - description: The resource name. - selfLink: - type: string - description: The URI (SelfLink) of the forwarding rule resource. - IPAddress: - type: string - description: | - The IP address on behalf of which the forwarding rule serves. - -documentation: - - templates/forwarding_rule/README.md - -examples: - - templates/forwarding_rule/examples/forwarding_rule_regional.yaml - - templates/forwarding_rule/examples/forwarding_rule_global.yaml diff --git a/dm/templates/forwarding_rule/tests/integration/forwarding_rule.bats b/dm/templates/forwarding_rule/tests/integration/forwarding_rule.bats deleted file mode 100755 index 35831702e6a..00000000000 --- a/dm/templates/forwarding_rule/tests/integration/forwarding_rule.bats +++ /dev/null @@ -1,110 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" - # test specific variables - export REGION="us-central1" - export EXTERNAL_RES_NAME="external-global-fr-${RAND}" - export INTERNAL_RES_NAME="internal-regional-fr-${RAND}" - export PROXY_NAME="http-proxy-${RAND}" - export EXTERNAL_LB_SCHEME="EXTERNAL" - export EXTERNAL_PORT="80" - export INTERNAL_NAME="fr-internal-regional-${RAND}" - export INTERNAL_DESC="Internal description" - export INTERNAL_PORT="80" - export INTERNAL_LB_SCHEME="INTERNAL" - export ZONE="us-central1-f" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < "templates/forwarding_rule/tests/integration/${TEST_NAME}.yaml" > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - delete_config - fi - - # Per-test teardown steps. -} - - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - run gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config ${CONFIG} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] -} - -@test "Verifying global external forwarding rule" { - TARGET_PROXY="global/targetHttpProxies/${PROXY_NAME}" - run gcloud compute forwarding-rules describe \ - "${EXTERNAL_RES_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --global - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "portRange: ${EXTERNAL_PORT}-${EXTERNAL_PORT}" ]] - [[ "$output" =~ "loadBalancingScheme: ${EXTERNAL_LB_SCHEME}" ]] - [[ "$output" =~ "$TARGET_PROXY" ]] -} - -@test "Verifying regional internal forwarding rule" { - run gcloud compute forwarding-rules describe "${INTERNAL_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --region "${REGION}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "- '${INTERNAL_PORT}'" ]] - [[ "$output" =~ "description: ${INTERNAL_DESC}" ]] - [[ "$output" =~ "name: ${INTERNAL_NAME}" ]] - [[ "$output" =~ "loadBalancingScheme: ${INTERNAL_LB_SCHEME}" ]] - [[ "$output" =~ "regional-internal-backend-service-${RAND}" ]] -} - -@test "Deleting deployment" { - run gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" -q \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] -} diff --git a/dm/templates/forwarding_rule/tests/integration/forwarding_rule.yaml b/dm/templates/forwarding_rule/tests/integration/forwarding_rule.yaml deleted file mode 100644 index 7f4c31e3537..00000000000 --- a/dm/templates/forwarding_rule/tests/integration/forwarding_rule.yaml +++ /dev/null @@ -1,108 +0,0 @@ -# Test of the forwarding rule template. -# - -imports: - - path: templates/forwarding_rule/forwarding_rule.py - name: forwarding_rule.py - -resources: - - name: ${EXTERNAL_RES_NAME} - type: forwarding_rule.py - properties: - loadBalancingScheme: ${EXTERNAL_LB_SCHEME} - target: $(ref.${PROXY_NAME}.selfLink) - portRange: ${EXTERNAL_PORT} - labels: - name: testlabelname - owner: megatron - - name: ${INTERNAL_RES_NAME} - type: forwarding_rule.py - properties: - name: ${INTERNAL_NAME} - description: ${INTERNAL_DESC} - region: ${REGION} - ports: - - ${INTERNAL_PORT} - loadBalancingScheme: ${INTERNAL_LB_SCHEME} - backendService: $(ref.regional-internal-backend-service-${RAND}.selfLink) - -# Test prerequisites: backend services, group managers, healthchecks, -# the URL map, instance template, and network. - - name: ${PROXY_NAME} - type: compute.v1.targetHttpProxy - properties: - urlMap: $(ref.url-map-${RAND}.selfLink) - - - name: url-map-${RAND} - type: compute.v1.urlMap - properties: - defaultService: $(ref.global-external-backend-service-${RAND}.selfLink) - - - name: regional-internal-backend-service-${RAND} - type: compute.v1.regionBackendService - properties: - region: ${REGION} - protocol: TCP - loadBalancingScheme: INTERNAL - backends: - - group: $(ref.regional-igm-${RAND}.instanceGroup) - healthChecks: - - $(ref.test-healthcheck-tcp-${RAND}.selfLink) - - - name: regional-igm-${RAND} - type: compute.v1.regionInstanceGroupManager - properties: - instanceTemplate: $(ref.instance-template-${RAND}.selfLink) - region: ${REGION} - targetSize: 1 - - - name: global-external-backend-service-${RAND} - type: compute.v1.backendService - properties: - protocol: HTTP - healthChecks: - - $(ref.test-healthcheck-http-${RAND}.selfLink) - loadBalancingScheme: EXTERNAL - backends: - - group: $(ref.zonal-igm-${RAND}.instanceGroup) - - - name: zonal-igm-${RAND} - type: compute.v1.instanceGroupManager - properties: - instanceTemplate: $(ref.instance-template-${RAND}.selfLink) - zone: ${ZONE} - targetSize: 1 - - - name: instance-template-${RAND} - type: compute.v1.instanceTemplate - properties: - properties: - machineType: f1-micro - disks: - - autoDelete: true - boot: true - deviceName: boot - initializeParams: - sourceImage: projects/ubuntu-os-cloud/global/images/family/ubuntu-1804-lts - type: PERSISTENT - networkInterfaces: - - accessConfigs: - - name: External NAT - type: ONE_TO_ONE_NAT - network: $(ref.test-network-${RAND}.selfLink) - - - name: test-healthcheck-tcp-${RAND} - type: compute.v1.healthCheck - properties: - type: TCP - tcpHealthCheck: - port: ${INTERNAL_PORT} - proxyHeader: NONE - - - name: test-healthcheck-http-${RAND} - type: compute.v1.httpHealthCheck - - - name: test-network-${RAND} - type: compute.v1.network - properties: - autoCreateSubnetworks: true diff --git a/dm/templates/gcs_bucket/README.md b/dm/templates/gcs_bucket/README.md deleted file mode 100644 index 534d82db95c..00000000000 --- a/dm/templates/gcs_bucket/README.md +++ /dev/null @@ -1,69 +0,0 @@ -# Google Cloud Storage Bucket - -This template creates a Google Cloud Storage bucket. - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) -- Grant the [storage.admin](https://cloud.google.com/storage/docs/access-control/iam-roles) IAM role to the Deployment Manager service account - -## Deployment - -### Resources - -- [gcp-types/storage-v1:buckets](https://cloud.google.com/storage/docs/creating-buckets) -- gcp-types/storage-v1:virtual.buckets.iamMemberBinding - -### Properties - -See the `properties` section in the schema file(s): - -- [gcs_bucket](gcs_bucket.py.schema) - -### Usage - -1. Clone the [Deployment Manager samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit) - - ```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit - ``` - -2. Go to the [dm](../../) directory - - ```shell - cd dm - ``` - -3. Copy the example DM config to be used as a model for the deployment, in this case [examples/gcs\_bucket.yaml](examples/gcs_bucket.yaml) - - ```shell - cp templates/gcs_bucket/examples/gcs_bucket.yaml my_gcs_bucket.yaml - ``` - -4. Change the values in the config file to match your specific GCP setup. - Refer to the properties in the schema files described above. - - ```shell - vim my_gcs_bucket.yaml # <== Replace the placeholders in the file - ``` - -5. Create your deployment as described below, replacing - with your with your own deployment name - - ```shell - gcloud deployment-manager deployments create \ - --config my_gcs_bucket.yaml - ``` - -6. In case you need to delete your deployment: - - ```shell - gcloud deployment-manager deployments delete - ``` - -## Examples - -- [Storage Bucket](examples/gcs_bucket.yaml) -- [Storage Bucket with LifeCycle Enabled](examples/gcs_bucket_lifecycle.yaml) -- [Storage Bucket with IAM Bindings](examples/gcs_bucket_iam_bindings.yaml) diff --git a/dm/templates/gcs_bucket/examples/gcs_bucket.yaml b/dm/templates/gcs_bucket/examples/gcs_bucket.yaml deleted file mode 100644 index 6bcaaa5e543..00000000000 --- a/dm/templates/gcs_bucket/examples/gcs_bucket.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# Example usage of the Google Cloud Storage bucket template. -# -# In this example, a storage bucket is created in the us-east1 location. -# Cloud storage bucket names must be globally unique. -# For bucket and object naming guidelines, -# refer to https://cloud.google.com/storage/docs/naming. -# -# Replace the placeholder with a globally unique storage -# bucket name. For details, refer to -# https://cloud.google.com/storage/docs/json_api/v1/buckets. - -imports: - - path: templates/gcs_bucket/gcs_bucket.py - name: gcs_bucket.py - -resources: - - name: - type: gcs_bucket.py - properties: - name: - location: us-east1 - versioning: - enabled: True - labels: - env: development diff --git a/dm/templates/gcs_bucket/examples/gcs_bucket_iam_bindings.yaml b/dm/templates/gcs_bucket/examples/gcs_bucket_iam_bindings.yaml deleted file mode 100644 index d910601c398..00000000000 --- a/dm/templates/gcs_bucket/examples/gcs_bucket_iam_bindings.yaml +++ /dev/null @@ -1,36 +0,0 @@ -# Example usage of the Google Cloud Storage bucket template. -# -# In this example, a storage bucket is created in the us-east1 location. -# Cloud storage bucket names must be globally unique. -# For bucket and object naming guidelines, -# refer to https://cloud.google.com/storage/docs/naming. -# -# IAM Policies are also applied with the `bindings` section. -# -# Replace the following placeholders with the relevant values: -# : a globally unique storage bucket name -# : a valid user account -# : a valid service account email -# address -# -# For details, refer to -# https://cloud.google.com/storage/docs/json_api/v1/buckets. - -imports: - - path: templates/gcs_bucket/gcs_bucket.py - name: gcs_bucket.py - -resources: - - name: - type: gcs_bucket.py - properties: - name: - location: us-east1 - versioning: - enabled: True - bindings: - - role: roles/storage.objectViewer - members: - - user: - - serviceAccount: - diff --git a/dm/templates/gcs_bucket/examples/gcs_bucket_lifecycle.yaml b/dm/templates/gcs_bucket/examples/gcs_bucket_lifecycle.yaml deleted file mode 100644 index 8b897e3223c..00000000000 --- a/dm/templates/gcs_bucket/examples/gcs_bucket_lifecycle.yaml +++ /dev/null @@ -1,38 +0,0 @@ -# Example usage of the Google Cloud Storage bucket template with lifecycle -# and object versioning enabled. -# -# In this example, a storage bucket is created in the us-east1 location. -# Cloud storage bucket names must be globally unique. -# For bucket and object naming guidelines, -# refer to https://cloud.google.com/storage/docs/naming. -# -# Replace the placeholder with a globally unique storage -# bucket name. For details, refer to -# https://cloud.google.com/storage/docs/json_api/v1/buckets. - -imports: - - path: templates/gcs_bucket/gcs_bucket.py - name: gcs_bucket.py - -resources: - - name: - type: gcs_bucket.py - properties: - name: - location: us-east1 - versioning: - enabled: True - lifecycle: - rule: - - action: - type: SetStorageClass - storageClass: NEARLINE - condition: - age: 36500 - createdBefore: "2018-08-16" - isLive: false - matchesStorageClass: - - REGIONAL - - STANDARD - - COLDLINE - numNewerVersions: 5 diff --git a/dm/templates/gcs_bucket/gcs_bucket.py b/dm/templates/gcs_bucket/gcs_bucket.py deleted file mode 100644 index d1316ee506d..00000000000 --- a/dm/templates/gcs_bucket/gcs_bucket.py +++ /dev/null @@ -1,131 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" This template creates a Google Cloud Storage bucket. """ - -from hashlib import sha1 - - -def generate_config(context): - """ Entry point for the deployment resources. """ - - resources = [] - properties = context.properties - project_id = properties.get('project', context.env['project']) - bucket_name = properties.get('name', context.env['name']) - - # output variables - bucket_selflink = '$(ref.{}.selfLink)'.format(context.env['name']) - bucket_uri = 'gs://' + bucket_name + '/' - - bucket = { - 'name': context.env['name'], - # https://cloud.google.com/storage/docs/json_api/v1/buckets - 'type': 'gcp-types/storage-v1:buckets', - 'properties': { - 'project': project_id, - 'name': bucket_name - } - } - - requesterPays = context.properties.get('requesterPays') - if requesterPays is not None: - bucket['properties']['billing'] = {'requesterPays': requesterPays} - - optional_props = [ - 'acl', - 'iamConfiguration', - 'retentionPolicy', - 'encryption', - 'defaultEventBasedHold', - 'cors', - 'defaultObjectAcl', - 'billing', - 'location', - 'versioning', - 'storageClass', - 'predefinedAcl', - 'predefinedDefaultObjectAcl', - 'logging', - 'lifecycle', - 'labels', - 'website' - ] - - for prop in optional_props: - if prop in properties: - bucket['properties'][prop] = properties[prop] - - if not properties.get('iamConfiguration', {}).get('bucketPolicyOnly', {}).get('enabled', False): - if 'predefinedAcl' not in bucket['properties']: - bucket['properties']['predefinedAcl'] = 'private' - if 'predefinedDefaultObjectAcl' not in bucket['properties']: - bucket['properties']['predefinedDefaultObjectAcl'] = 'private' - - resources.append(bucket) - - # If IAM policy bindings are defined, apply these bindings. - storage_provider_type = 'gcp-types/storage-v1:virtual.buckets.iamMemberBinding' - bindings = properties.get('bindings', []) - - if 'dependsOn' in properties: - dependson = { 'metadata': { 'dependsOn': properties['dependsOn'] } } - dependson_root = properties['dependsOn'] - else: - dependson = {} - dependson_root = [] - - if bindings: - for role in bindings: - for member in role['members']: - suffix = sha1('{}-{}'.format(role['role'], member).encode('utf-8')).hexdigest()[:10] - policy_get_name = '{}-{}'.format(context.env['name'], suffix) - policy_name = '{}-iampolicy'.format(policy_get_name) - iam_policy_resource = { - 'name': policy_name, - # TODO - Virtual type documentation needed - 'type': (storage_provider_type), - 'properties': - { - 'bucket': '$(ref.{}.name)'.format(context.env['name']), - 'role': role['role'], - 'member': member, - } - } - iam_policy_resource.update(dependson) - resources.append(iam_policy_resource) - dependson = { 'metadata': { 'dependsOn': [policy_name] + dependson_root } } - - if properties.get('billing', {}).get('requesterPays'): - for resource in resources: - resource['properties']['userProject'] = properties.get('userProject', context.env['project']) - - return { - 'resources': - resources, - 'outputs': - [ - { - 'name': 'name', - 'value': bucket_name - }, - { - 'name': 'selfLink', - 'value': bucket_selflink - }, - { - 'name': 'url', - 'value': bucket_uri - } - ] - } diff --git a/dm/templates/gcs_bucket/gcs_bucket.py.schema b/dm/templates/gcs_bucket/gcs_bucket.py.schema deleted file mode 100644 index 3acb1a7f825..00000000000 --- a/dm/templates/gcs_bucket/gcs_bucket.py.schema +++ /dev/null @@ -1,499 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Google Cloud Storage Bucket - author: Sourced Group Inc. - version: 1.1.1 - description: | - Supports creation of a Google Cloud Storage bucket. - - For more information on this resource: - https://cloud.google.com/storage/docs/json_api/. - - APIs endpoints used by this template: - - gcp-types/storage-v1:buckets => - https://cloud.google.com/storage/docs/json_api/v1/buckets - - gcp-types/storage-v1:virtual.buckets.iamMemberBinding => - TODO - Virtual type documentation needed - -additionalProperties: false - -allOf: - - oneOf: - - properties: - iamConfiguration: - properties: - bucketPolicyOnly: - properties: - enabled: - enum: [False] - - allOf: - - not: - required: - - acl - - not: - required: - - defaultObjectAcl - - required: - - iamConfiguration - properties: - iamConfiguration: - properties: - bucketPolicyOnly: - properties: - enabled: - enum: [True] - - oneOf: - - allOf: - - not: - required: - - userProject - - properties: - billing: - properties: - requesterPays: - enum: [False] - - allOf: - - required: - - billing - - properties: - billing: - properties: - requesterPays: - enum: [True] - -properties: - name: - type: string - description: The name of the bucket. Resource name would be used if omitted. - project: - type: string - description: | - The project ID of the project containing the bucket. - userProject: - type: string - description: | - The project to be billed for this request. Current project is used if not set. - location: - type: string - default: us-east1 - description: The region name where the bucket is deployed. - defaultEventBasedHold: - type: boolean - description: Whether or not to automatically apply an eventBasedHold to new objects added to the bucket. - storageClass: - type: string - default: STANDARD - description: | - The bucket's default storage class. Defines how objects - in the bucket are stored; determines the SLA and the - cost of storage. - enum: - - REGIONAL - - MULTI_REGIONAL - - STANDARD - - NEARLINE - - COLDLINE - - DURABLE_REDUCED_AVAILABILITY - versioning: - type: object - additionalProperties: false - description: Enables/disables object versioning. - required: - - enabled - properties: - enabled: - type: boolean - description: Enables/disables object versioning. - predefinedAcl: - type: string - description: | - The predefined or "canned" ACL - an alias for a set of specific - ACL entries that you can use to quickly apply multiple ACL entries - to a bucket or object in a single operation. - Ref: https://cloud.google.com/storage/docs/access-control/lists. - enum: - - authenticatedRead - - private - - projectPrivate - - publicRead - - publicReadWrite - predefinedDefaultObjectAcl: - type: string - enum: - - authenticatedRead - - bucketOwnerFullControl - - bucketOwnerRead - - private - - projectPrivate - - publicRead - description: | - The predefined or "canned" ACL for the default object in the bucket - - an alias for a set of specific ACL entries that you can use to quickly - apply multiple ACL entries to a bucket or object in a single operation. - Ref: https://cloud.google.com/storage/docs/access-control/lists. - encryption: - type: object - additionalProperties: false - description: | - Encryption configuration for a bucket. - required: - - defaultKmsKeyName - properties: - defaultKmsKeyName: - type: string - description: | - A Cloud KMS key that will be used to encrypt objects inserted into this bucket, - if no encryption method is specified. - retentionPolicy: - type: object - additionalProperties: false - description: | - The bucket's retention policy, which defines the minimum age an object in the bucket - must have to be deleted or overwritten. - required: - - retentionPeriod - properties: - retentionPeriod: - type: integer - maximum: 3155760000 - exclusiveMaximum: True - description: | - The period of time, in seconds, that objects in the bucket must be retained and cannot be deleted, - overwritten, or archived. The value must be less than 3,155,760,000 seconds. - iamConfiguration: - type: object - additionalProperties: false - description: | - The bucket's IAM configuration. - required: - - bucketPolicyOnly - properties: - bucketPolicyOnly: - type: object - additionalProperties: false - description: | - The bucket's Bucket Policy Only configuration. - required: - - enabled - properties: - enabled: - type: boolean - default: False - description: | - Whether or not the bucket uses Bucket Policy Only. - If set, access checks only use bucket-level IAM policies or above. - billing: - type: object - additionalProperties: false - description: | - The bucket's billing configuration. - required: - - requesterPays - properties: - requesterPays: - type: boolean - default: False - description: | - When set to true, Requester Pays is enabled for this bucket. - logging: - type: object - additionalProperties: false - required: - - logBucket - properties: - logBucket: - type: string - description: | - The destination bucket where the current bucket's logs - must be placed. - logObjectPrefix: - type: string - description: The prefix for log object names. - cors: - type: array - uniqueItems: true - description: | - The bucket's Cross-Origin Resource Sharing (CORS) configuration. - items: - type: object - additionalProperties: false - required: - - method - - origin - properties: - maxAgeSeconds: - type: integer - description: | - The value, in seconds, to return in the Access-Control-Max-Age header used in preflight responses. - method: - type: array - uniqueItems: true - description: | - The list of HTTP methods on which to include CORS response headers, (GET, OPTIONS, POST, etc) - Note: "*" is permitted in the list of methods, and means "any method". - items: - type: string - origin: - type: array - uniqueItems: true - description: | - The list of Origins eligible to receive CORS response headers. - Note: "*" is permitted in the list of origins, and means "any Origin". - items: - type: string - responseHeader: - type: array - uniqueItems: true - description: | - The list of HTTP headers other than the simple response headers to give permission - for the user-agent to share across domains. - items: - type: string - defaultObjectAcl: - type: array - uniqueItems: true - description: | - Default access controls to apply to new objects when no ACL is provided. - This list defines an entity and role for one or more defaultObjectAccessControls Resources. - items: - type: object - additionalProperties: false - required: - - role - - entity - properties: - role: - type: string - description: | - The access permission for the entity. - - Acceptable values are: - "OWNER" - "READER" - enum: - - OWNER - - READER - entity: - type: string - description: | - The entity holding the permission, in one of the following forms: - - user-userId - - user-email - - group-groupId - - group-email - - domain-domain - - project-team-projectId - - allUsers - - allAuthenticatedUsers - - Examples: - - The user liz@example.com would be user-liz@example.com. - - The group example@googlegroups.com would be group-example@googlegroups.com. - - To refer to all members of the G Suite for Business domain example.com, the entity would be domain-example.com. - acl: - type: array - uniqueItems: true - description: | - Access controls on the bucket, containing one or more bucketAccessControls Resources. - items: - type: object - additionalProperties: false - required: - - role - - entity - properties: - role: - type: string - description: | - The access permission for the entity. - enum: - - OWNER - - READER - - WRITER - entity: - type: string - description: | - The entity holding the permission, in one of the following forms: - - user-userId - - user-email - - group-groupId - - group-email - - domain-domain - - project-team-projectId - - allUsers - - allAuthenticatedUsers - - Examples: - - The user liz@example.com would be user-liz@example.com. - - The group example@googlegroups.com would be group-example@googlegroups.com. - - To refer to all members of the G Suite for Business domain example.com, the entity would be domain-example.com. - bindings: - type: array - uniqueItems: true - description: IAM bindings for the bucket. - items: - type: object - additionalProperties: false - required: - - role - - members - properties: - role: - type: string - pattern: ^roles\/storage\. - description: The role to assign to members. - members: - type: array - uniqueItems: true - items: - type: string - description: | - A collection of identifiers for members who may assume the provided role. - Recognized identifiers are as follows: - allUsers — A special identifier that represents anyone on the internet; with or without a Google account. - allAuthenticatedUsers — A special identifier that represents anyone who is authenticated with - a Google account or a service account. - user:emailid — An email address that represents a specific account. For example, - user:alice@gmail.com or user:joe@example.com. - serviceAccount:emailid — An email address that represents a service account. For example, - serviceAccount:my-other-app@appspot.gserviceaccount.com . - group:emailid — An email address that represents a Google group. For example, group:admins@example.com. - domain:domain — A G Suite domain name that represents all the users of that domain. - For example, domain:google.com or domain:example.com. - projectOwner:projectid — Owners of the given project. For example, projectOwner:my-example-project - projectEditor:projectid — Editors of the given project. For example, projectEditor:my-example-project - projectViewer:projectid — Viewers of the given project. For example, projectViewer:my-example-project - lifecycle: - type: object - additionalProperties: false - description: The storage object's lifecycle actions and conditions. - properties: - rule: - type: array - uniqueItems: true - description: The lifecycle action and condition. - items: - type: object - additionalProperties: false - required: - - action - - condition - properties: - action: - type: object - additionalProperties: false - description: The action to be taken if the condition is met. - required: - - type - properties: - storageClass: - type: string - description: - The storage class to switch on if the condition is met. - enum: - - NEARLINE - - COLDLINE - type: - type: string - description: The action type - setStorageClass or Delete. - enum: - - SetStorageClass - - Delete - condition: - type: object - additionalProperties: false - description: The lifecycle condition. - properties: - age: - type: number - description: | - The object age. Selects all objects of this age or older. - createdBefore: - type: string - description: | - The date part of a date in the RFC 3339 format. - For example, "2013-01-15". - matchesStorageClass: - type: array - uniqueItems: true - description: | - All objects with any of the selected storage classes. - items: - type: string - enum: - - MULTI_REGIONAL - - REGIONAL - - STANDARD - - DURABLE_REDUCED_AVAILABILITY - - NEARLINE - - COLDLINE - isLive: - type: boolean - description: | - Defines whether the object is live. Applies only to - versioned objects. - numNewerVersions: - type: number - description: | - The number of newer versions. Selects all objects with - at least that many newer versions. Applies only to - versioned objects. - labels: - type: object - description: User-provided labels in key/value pairs. - requesterPays: - type: boolean - description: | - When set to true, Requester Pays is enabled for this bucket. - website: - type: object - additionalProperties: false - description: | - The bucket's website configuration, controlling how the service behaves - when accessing the bucket contents as a web site. - properties: - mainPageSuffix: - type: string - description: | - The suffix that allows creation of index.html objects to represent - directory pages. If the requested object path is missing, the service - ensures that the trailing '/' is present, appends this suffix, and - attempt to retrieve the resulting object. - notFoundPage: - type: string - description: | - The named object from the bucket that the service returns as the - content for the 404 Not Found result if the requested object path - is missing, and no mainPageSuffix object is provided. -outputs: - name: - type: string - description: The name of the storage bucket resource. - selfLink: - type: string - description: The URI (SelfLink) of the storage bucket resource. - url: - type: string - description: | - The base URL of the bucket in the gs:// format. - -documentation: - - templates/gcs_bucket/README.md - -examples: - - templates/gcs_bucket/examples/gcs_bucket.yaml - - templates/gcs_bucket/examples/gcs_bucket_iam_bindings.yaml - - templates/gcs_bucket/examples/gcs_bucket_lifecycle.yaml diff --git a/dm/templates/gcs_bucket/tests/integration/gcs_35_iam_bindings.bats b/dm/templates/gcs_bucket/tests/integration/gcs_35_iam_bindings.bats deleted file mode 100644 index 40951b45faa..00000000000 --- a/dm/templates/gcs_bucket/tests/integration/gcs_35_iam_bindings.bats +++ /dev/null @@ -1,96 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -export TEST_SERVICE_ACCOUNT="test-gcs-iam-sa-${RAND}" - -########## HELPER FUNCTIONS ########## - - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" - # Test specific variables: - export BUCKET_NAME="test-bucket-${RAND}" - export ROLE="roles/storage.objectViewer" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < ${BATS_TEST_DIRNAME}/${TEST_NAME}.yaml > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - delete_config - rm -f "${RANDOM_FILE}" - fi - - # Per-test teardown steps. -} - -########## TESTS ########## - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config "${CONFIG}" --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] -} - -@test "Verify if Storage Bucket ${BUCKET_NAME} is created " { - res=$(gsutil ls | grep "${BUCKET_NAME}") - [[ "$status" -eq 0 ]] - [[ "$res" =~ "gs://${BUCKET_NAME}/" ]] -} - -@test "Verify if SAs have role ${ROLE}" { - role=$(gsutil iam get "gs://${BUCKET_NAME}/" | grep role) - [[ "$status" -eq 0 ]] - [[ "$role" =~ "${ROLE}" ]] -} - -@test "Verify if SAs are the members of this bucket" { - run gsutil iam get "gs://${BUCKET_NAME}/" | grep serviceAccount:${TEST_SERVICE_ACCOUNT}-.*@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - [[ "$status" -eq 0 ]] -} - -@test "Deleting deployment ${DEPLOYMENT_NAME}" { - gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -q - [[ "$status" -eq 0 ]] - - run gsutil ls - [[ "$status" -eq 0 ]] - [[ ! "$output" =~ "gs://${BUCKET_NAME}/" ]] -} diff --git a/dm/templates/gcs_bucket/tests/integration/gcs_35_iam_bindings.yaml b/dm/templates/gcs_bucket/tests/integration/gcs_35_iam_bindings.yaml deleted file mode 100644 index 0c16d1a5e77..00000000000 --- a/dm/templates/gcs_bucket/tests/integration/gcs_35_iam_bindings.yaml +++ /dev/null @@ -1,268 +0,0 @@ -# Variables (declared in the gcs_bucket.bats file): -# RAND: a random string used by the testing suite -# BUCKET_NAME: a globally unique Cloud Storage bucket name -# ROLE: a role to be assigned to member(s); e.g., roles/storage.objectViewer -# TEST_SERVICE_ACCOUNT: Service account name -# CLOUD_FOUNDATION_PROJECT_ID: Project ID - -imports: - - path: templates/gcs_bucket/gcs_bucket.py - name: gcs_bucket.py - -resources: - - name: ${BUCKET_NAME} - type: gcs_bucket.py - properties: - name: ${BUCKET_NAME} - location: us-east1 - versioning: - enabled: True - bindings: - - role: ${ROLE} - members: - - serviceAccount:${TEST_SERVICE_ACCOUNT}-1@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-2@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-3@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-4@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-5@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-6@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-7@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-8@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-9@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-10@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-11@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-12@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-13@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-14@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-15@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-16@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-17@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-18@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-19@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-20@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-21@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-22@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-23@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-24@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-25@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-26@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-27@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-28@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-29@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-30@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-31@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-32@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-33@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-34@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-35@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - - name: ${TEST_SERVICE_ACCOUNT}-1-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-1 - displayName: ${TEST_SERVICE_ACCOUNT}-1 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-2-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-2 - displayName: ${TEST_SERVICE_ACCOUNT}-2 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-3-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-3 - displayName: ${TEST_SERVICE_ACCOUNT}-3 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-4-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-4 - displayName: ${TEST_SERVICE_ACCOUNT}-4 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-5-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-5 - displayName: ${TEST_SERVICE_ACCOUNT}-5 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-6-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-6 - displayName: ${TEST_SERVICE_ACCOUNT}-6 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-7-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-7 - displayName: ${TEST_SERVICE_ACCOUNT}-7 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-8-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-8 - displayName: ${TEST_SERVICE_ACCOUNT}-8 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-9-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-9 - displayName: ${TEST_SERVICE_ACCOUNT}-9 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-10-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-10 - displayName: ${TEST_SERVICE_ACCOUNT}-10 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-11-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-11 - displayName: ${TEST_SERVICE_ACCOUNT}-11 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-12-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-12 - displayName: ${TEST_SERVICE_ACCOUNT}-12 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-13-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-13 - displayName: ${TEST_SERVICE_ACCOUNT}-13 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-14-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-14 - displayName: ${TEST_SERVICE_ACCOUNT}-14 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-15-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-15 - displayName: ${TEST_SERVICE_ACCOUNT}-15 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-16-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-16 - displayName: ${TEST_SERVICE_ACCOUNT}-16 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-17-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-17 - displayName: ${TEST_SERVICE_ACCOUNT}-17 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-18-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-18 - displayName: ${TEST_SERVICE_ACCOUNT}-18 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-19-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-19 - displayName: ${TEST_SERVICE_ACCOUNT}-19 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-20-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-20 - displayName: ${TEST_SERVICE_ACCOUNT}-20 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-21-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-21 - displayName: ${TEST_SERVICE_ACCOUNT}-21 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-22-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-22 - displayName: ${TEST_SERVICE_ACCOUNT}-22 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-23-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-23 - displayName: ${TEST_SERVICE_ACCOUNT}-23 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-24-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-24 - displayName: ${TEST_SERVICE_ACCOUNT}-24 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-25-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-25 - displayName: ${TEST_SERVICE_ACCOUNT}-25 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-26-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-26 - displayName: ${TEST_SERVICE_ACCOUNT}-26 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-27-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-27 - displayName: ${TEST_SERVICE_ACCOUNT}-27 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-28-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-28 - displayName: ${TEST_SERVICE_ACCOUNT}-28 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-29-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-29 - displayName: ${TEST_SERVICE_ACCOUNT}-29 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-30-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-30 - displayName: ${TEST_SERVICE_ACCOUNT}-310 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-31-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-31 - displayName: ${TEST_SERVICE_ACCOUNT}-31 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-32-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-32 - displayName: ${TEST_SERVICE_ACCOUNT}-32 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-33-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-33 - displayName: ${TEST_SERVICE_ACCOUNT}-33 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-34-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-34 - displayName: ${TEST_SERVICE_ACCOUNT}-34 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-35-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-35 - displayName: ${TEST_SERVICE_ACCOUNT}-35 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} diff --git a/dm/templates/gcs_bucket/tests/integration/gcs_bucket.bats b/dm/templates/gcs_bucket/tests/integration/gcs_bucket.bats deleted file mode 100644 index b2a9467556b..00000000000 --- a/dm/templates/gcs_bucket/tests/integration/gcs_bucket.bats +++ /dev/null @@ -1,163 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" - # Test specific variables: - export BUCKET_NAME="test-bucket-${RAND}" - export SA_NAME="${BUCKET_NAME}@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com" - export SA_FQDN="serviceAccount:${SA_NAME}" - export ROLE="roles/storage.objectViewer" - export LIFECYCLE_ACTION_TYPE="SetStorageClass" - export LIFECYCLE_STORAGE_CLASS="NEARLINE" - export LIFECYCLE_AGE_DAYS="36500" - export LIFECYCLE_OBJ_CREATED_BEFORE="2018-01-01" - export LIFECYCLE_NUM_NEWERVERSION="5" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < ${BATS_TEST_DIRNAME}/${TEST_NAME}.yaml > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - # create service accounts to test IAM bindings - gcloud iam service-accounts create "${BUCKET_NAME}" \ - --display-name "Test Service Account" - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - delete_config - rm -f "${RANDOM_FILE}" - # delete service account after tests are complete. - gcloud --quiet iam service-accounts delete "${SA_NAME}" - fi - - # Per-test teardown steps. -} - -########## TESTS ########## - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config "${CONFIG}" --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] -} - -@test "Verify if Storage Bucket ${BUCKET_NAME} is created " { - res=$(gsutil ls | grep "${BUCKET_NAME}") - [[ "$status" -eq 0 ]] - [[ "$res" =~ "gs://${BUCKET_NAME}/" ]] -} - -@test "storageClass on ${BUCKET_NAME} is set to STANDARD " { - run gsutil defstorageclass get "gs://${BUCKET_NAME}/" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "STANDARD" ]] -} - -@test "Versioning on ${BUCKET_NAME} is ENABLED " { - run gsutil versioning get "gs://${BUCKET_NAME}/" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "Enabled" ]] -} - -@test "Logging configuration on ${BUCKET_NAME} is not set " { - run gsutil logging get "gs://${BUCKET_NAME}/" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "has no logging configuration" ]] -} - -@test "Verify if SA ${SA_NAME} has role ${ROLE}" { - role=$(gsutil iam get "gs://${BUCKET_NAME}/" | grep role) - [[ "$status" -eq 0 ]] - [[ "$role" =~ "${ROLE}" ]] -} - -@test "Verify if SA ${SA_NAME} is a member of this bucket" { - member=$(gsutil iam get "gs://${BUCKET_NAME}/" | grep serviceAccount) - [[ "$status" -eq 0 ]] - [[ "$member" =~ "${SA_NAME}" ]] -} - -@test "lifeCycle configuration is set on ${BUCKET_NAME}" { - run gsutil lifecycle get "gs://${BUCKET_NAME}/" - [[ "$status" -eq 0 ]] - [[ ! "$output" =~ "has no lifecycle configuration" ]] -} - -@test "lifeCycle Action Type is ${LIFECYCLE_ACTION_TYPE}" { - lc_type=$(gsutil lifecycle get "gs://${BUCKET_NAME}/" | \ - grep ${LIFECYCLE_ACTION_TYPE}) - [[ "$status" -eq 0 ]] - [[ "$lc_type" =~ "${LIFECYCLE_ACTION_TYPE}" ]] -} - -@test "lifeCycle StorageClass is set to ${LIFECYCLE_STORAGE_CLASS}" { - lc_class=$(gsutil lifecycle get "gs://${BUCKET_NAME}/" | \ - grep ${LIFECYCLE_STORAGE_CLASS}) - [[ "$status" -eq 0 ]] - [[ "$lc_class" =~ "${LIFECYCLE_STORAGE_CLASS}" ]] -} - -@test "lifeCycle Condition has AGE set to ${LIFECYCLE_AGE_DAYS}" { - lc_age=$(gsutil lifecycle get "gs://${BUCKET_NAME}/" | \ - grep age) - [[ "$status" -eq 0 ]] - [[ "$lc_age" =~ "${LIFECYCLE_AGE_DAYS}" ]] -} - -@test "lifeCycle Objects CreatedBefore Date is ${LIFECYCLE_OBJ_CREATED_BEFORE}" { - lc_date=$(gsutil lifecycle get "gs://${BUCKET_NAME}/" | \ - grep createdBefore) - [[ "$status" -eq 0 ]] - [[ "$lc_date" =~ "${LIFECYCLE_OBJ_CREATED_BEFORE}" ]] -} - -@test "lifeCycle numNewerVersions is ${LIFECYCLE_NUM_NEWERVERSION}" { - lc_ver=$(gsutil lifecycle get "gs://${BUCKET_NAME}/" | \ - grep numNewerVersions) - [[ "$status" -eq 0 ]] - [[ "$lc_ver" =~ "${LIFECYCLE_NUM_NEWERVERSION}" ]] -} - -@test "Deleting deployment ${DEPLOYMENT_NAME}" { - gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -q - [[ "$status" -eq 0 ]] - - run gsutil ls - [[ "$status" -eq 0 ]] - [[ ! "$output" =~ "gs://${BUCKET_NAME}/" ]] -} diff --git a/dm/templates/gcs_bucket/tests/integration/gcs_bucket.yaml b/dm/templates/gcs_bucket/tests/integration/gcs_bucket.yaml deleted file mode 100644 index 9463cd97b3c..00000000000 --- a/dm/templates/gcs_bucket/tests/integration/gcs_bucket.yaml +++ /dev/null @@ -1,38 +0,0 @@ -# Test of the Cloud Storage bucket template. -# -# Variables (declared in the gcs_bucket.bats file): -# RAND: a random string used by the testing suite -# BUCKET_NAME: a globally unique Cloud Storage bucket name -# ROLE: a role to be assigned to member(s); e.g., roles/storage.objectViewer -# SA_FQDN: a ServiceAccount FQDN serviceAccount: - -imports: - - path: templates/gcs_bucket/gcs_bucket.py - name: gcs_bucket.py - -resources: - - name: ${BUCKET_NAME} - type: gcs_bucket.py - properties: - name: ${BUCKET_NAME} - location: us-east1 - versioning: - enabled: True - bindings: - - role: ${ROLE} - members: - - ${SA_FQDN} - lifecycle: - rule: - - action: - type: ${LIFECYCLE_ACTION_TYPE} - storageClass: ${LIFECYCLE_STORAGE_CLASS} - condition: - age: ${LIFECYCLE_AGE_DAYS} - createdBefore: ${LIFECYCLE_OBJ_CREATED_BEFORE} - isLive: false - matchesStorageClass: - - REGIONAL - - STANDARD - - COLDLINE - numNewerVersions: ${LIFECYCLE_NUM_NEWERVERSION} diff --git a/dm/templates/gcs_bucket/tests/integration/gcs_iam_bucket_policy_only.bats b/dm/templates/gcs_bucket/tests/integration/gcs_iam_bucket_policy_only.bats deleted file mode 100644 index 6ad95321f44..00000000000 --- a/dm/templates/gcs_bucket/tests/integration/gcs_iam_bucket_policy_only.bats +++ /dev/null @@ -1,79 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="dm-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" - # Test specific variables: - export BUCKET_NAME="test-bucket-${RAND}" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < ${BATS_TEST_DIRNAME}/${TEST_NAME}.yaml > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - delete_config - rm -f "${RANDOM_FILE}" - fi - - # Per-test teardown steps. -} - -########## TESTS ########## - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config "${CONFIG}" --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] -} - -@test "BucketPolicyOnly setting on ${BUCKET_NAME} is ENABLED " { - run gsutil bucketpolicyonly get "gs://${BUCKET_NAME}/" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "Enabled: True" ]] -} - -@test "Deleting deployment ${DEPLOYMENT_NAME}" { - gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -q - [[ "$status" -eq 0 ]] - - run gsutil ls - [[ "$status" -eq 0 ]] - [[ ! "$output" =~ "gs://${BUCKET_NAME}/" ]] -} diff --git a/dm/templates/gcs_bucket/tests/integration/gcs_iam_bucket_policy_only.yaml b/dm/templates/gcs_bucket/tests/integration/gcs_iam_bucket_policy_only.yaml deleted file mode 100644 index b9cbdaf44ed..00000000000 --- a/dm/templates/gcs_bucket/tests/integration/gcs_iam_bucket_policy_only.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# Test of the Cloud Storage bucket template with bucketPolicyOnly option. -# -# Variables (declared in the gcs_bucket.bats file): -# BUCKET_NAME: a globally unique Cloud Storage bucket name - -imports: - - path: templates/gcs_bucket/gcs_bucket.py - name: gcs_bucket.py - -resources: - - name: ${BUCKET_NAME} - type: gcs_bucket.py - properties: - name: ${BUCKET_NAME} - location: us-east1 - iamConfiguration: - bucketPolicyOnly: - enabled: True diff --git a/dm/templates/gke/README.md b/dm/templates/gke/README.md deleted file mode 100644 index e078c4a5ba2..00000000000 --- a/dm/templates/gke/README.md +++ /dev/null @@ -1,70 +0,0 @@ -# Google Kubernetes Engine (GKE) - -This template creates a Google Kubernetes Engine cluster. - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) -- Create a [network and subnetwork](../network/README.md) -- Grant the [container.admin](https://cloud.google.com/kubernetes-engine/docs/how-to/iam) IAM role to the Deployment Manager service account - -## Deployment - -### Resources - -- [container-v1beta1:projects.locations.clusters](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters) -- [container.v1.cluster](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.zones.clusters) - -### Properties - -See the `properties` section in the schema file(s): - -- [GKE cluster schema](gke.py.schema) - -### Usage - -1. Clone the [Deployment Manager samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit) - - ```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit - ``` - -2. Go to the [dm](../../) directory - - ```shell - cd dm - ``` - -3. Copy the example DM config to be used as a model for the deployment, in this case [examples/gke.yaml](examples/gke.yaml) - - ```shell - cp templates/gke/examples/gke_zonal.yaml my_gke_zonal.yaml - ``` - -4. Change the values in the config file to match your specific GCP setup. - Refer to the properties in the schema files described above. - - ```shell - vim my_gke_zonal.yaml # <== change values to match your GCP setup - ``` - -5. Create your deployment as described below, replacing - with your with your own deployment name - - ```shell - gcloud deployment-manager deployments create \ - --config my_gke_zonal.yaml - ``` - -6. In case you need to delete your deployment: - - ```shell - gcloud deployment-manager deployments delete - ``` - -## Examples - -- [GKE Zonal Cluster](examples/gke_zonal.yaml) -- [GKE Regional Cluster](examples/gke_regional.yaml) -- [GKE Private Regional Cluster](examples/gke_regional_private.yaml) diff --git a/dm/templates/gke/examples/gke_regional.yaml b/dm/templates/gke/examples/gke_regional.yaml deleted file mode 100644 index b101eb60c40..00000000000 --- a/dm/templates/gke/examples/gke_regional.yaml +++ /dev/null @@ -1,32 +0,0 @@ -# Example of the GKE template usage. -# -# In this example, a GKE regional cluster is created. -# Replace the following with valid values -# : A valid VPC network name -# : A valid VPC subnet name - -imports: - - path: templates/gke/gke.py - name: gke.py - -resources: - - name: myk8sregional - type: gke.py - properties: - region: us-east1 - cluster: - name: myk8sregional - description: my awesome k8s cluster - network: - subnetwork: - nodePools: - - name: default - config: - oauthScopes: - - https://www.googleapis.com/auth/compute - - https://www.googleapis.com/auth/devstorage.read_only - - https://www.googleapis.com/auth/logging.write - - https://www.googleapis.com/auth/monitoring - locations: - - us-east1-c - - us-east1-b diff --git a/dm/templates/gke/examples/gke_regional_private.yaml b/dm/templates/gke/examples/gke_regional_private.yaml deleted file mode 100644 index 8394df52095..00000000000 --- a/dm/templates/gke/examples/gke_regional_private.yaml +++ /dev/null @@ -1,65 +0,0 @@ -# Example of the GKE template usage. -# -# In this example, a GKE private regional cluster is created. -# Optional properties are also set in this example. -# -# Replace the following with valid values: -# : a valid VPC network name. -# : a valid VPC subnet name. -# Replace credentials, for accessing the master endpoint, with the valid values: -# : user name for masterAuth. -# : password (atleast 16 chars) for masterAuth - -imports: - - path: templates/gke/gke.py - name: gke.py - -resources: - - name: myk8sregional - type: gke.py - properties: - region: us-east1 - cluster: - name: myk8sregional - description: my awesome k8s cluster - network: - subnetwork: - nodePools: - - name: default - initialNodeCount: 1 - autoscaling: - enabled: True - minNodeCount: 1 - maxNodeCount: 2 - management: - autoUpgrade: True - autoRepair: True - config: - localSsdCount: 1 - oauthScopes: - - https://www.googleapis.com/auth/compute - - https://www.googleapis.com/auth/devstorage.read_only - - https://www.googleapis.com/auth/logging.write - - https://www.googleapis.com/auth/monitoring - taints: - - key: mykey1 - value: value1 - effect: NO_SCHEDULE - - key: mykey2 - value: value2 - effect: NO_EXECUTE - locations: - - us-east1-c - - us-east1-b - masterAuth: - username: - password: - loggingService: logging.googleapis.com - monitoringService: monitoring.googleapis.com - privateClusterConfig: - enablePrivateNodes: True - masterIpv4CidrBlock: 172.16.0.0/28 - clusterIpv4Cidr: 10.0.0.0/11 - ipAllocationPolicy: - useIpAliases: True - servicesIpv4CidrBlock: 10.96.0.0/18 diff --git a/dm/templates/gke/examples/gke_zonal.yaml b/dm/templates/gke/examples/gke_zonal.yaml deleted file mode 100644 index ab5e60c11af..00000000000 --- a/dm/templates/gke/examples/gke_zonal.yaml +++ /dev/null @@ -1,31 +0,0 @@ -# Example of the GKE template usage. -# -# In this example, a GKE zonal cluster is created. -# -# Replace the following with valid values -# : A valid VPC network name -# : A valid VPC subnet name - - -imports: - - path: templates/gke/gke.py - name: gke.py - -resources: - - name: myk8s - type: gke.py - properties: - zone: us-east1-b - cluster: - name: myk8s - description: my awesome k8s cluster - network: - subnetwork: - nodePools: - - name: default - config: - oauthScopes: - - https://www.googleapis.com/auth/compute - - https://www.googleapis.com/auth/devstorage.read_only - - https://www.googleapis.com/auth/logging.write - - https://www.googleapis.com/auth/monitoring diff --git a/dm/templates/gke/gke.py b/dm/templates/gke/gke.py deleted file mode 100644 index 303267b5f33..00000000000 --- a/dm/templates/gke/gke.py +++ /dev/null @@ -1,142 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" This template creates a Google Kubernetes Engine cluster. """ - -def generate_config(context): - """ Entry point for the deployment resources. """ - - resources = [] - outputs = [] - properties = context.properties - name = properties['cluster'].get('name', context.env['name']) - project_id = properties.get('project', context.env['project']) - propc = properties['cluster'] - gke_cluster = { - 'name': context.env['name'], - 'type': '', - 'properties': - { - 'parent': 'projects/{}/locations/{}'.format( - project_id, - properties.get('zone', properties.get('location', properties.get('region'))) - ), - 'cluster': - { - 'name': name, - } - } - } - - if properties.get('zone'): - # https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.zones.clusters - gke_cluster['type'] = 'gcp-types/container-v1beta1:projects.zones.clusters' - # TODO: remove, this is a bug - gke_cluster['properties']['zone'] = properties.get('zone') - else: - # https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters - gke_cluster['type'] = 'gcp-types/container-v1beta1:projects.locations.clusters' - - req_props = ['network', 'subnetwork'] - - optional_props = [ - 'initialNodeCount', - 'initialClusterVersion', - 'description', - 'nodeConfig', - 'nodePools', - 'privateClusterConfig', - 'binaryAuthorization', - 'binaryAuthorization', - 'networkConfig', - 'defaultMaxPodsConstraint', - 'resourceUsageExportConfig', - 'authenticatorGroupsConfig', - 'verticalPodAutoscaling', - 'tierSettings', - 'enableTpu', - 'databaseEncryption', - 'workloadIdentityConfig', - 'masterAuth', - 'loggingService', - 'monitoringService', - 'clusterIpv4Cidr', - 'addonsConfig', - 'locations', - 'enableKubernetesAlpha', - 'resourceLabels', - 'labelFingerprint', - 'legacyAbac', - 'networkPolicy', - 'ipAllocationPolicy', - 'masterAuthorizedNetworksConfig', - 'maintenancePolicy', - 'podSecurityPolicyConfig', - 'privateCluster', - 'masterIpv4CidrBlock', - 'releaseChannel' - ] - - cluster_props = gke_cluster['properties']['cluster'] - - for prop in req_props: - cluster_props[prop] = propc.get(prop) - if prop not in propc: - raise KeyError( - "{} is a required cluster property for a {} Cluster." - .format(prop, - cluster_type) - ) - - for oprop in optional_props: - if oprop in propc: - cluster_props[oprop] = propc[oprop] - - resources.append(gke_cluster) - - # Output variables - output_props = [ - 'selfLink', - 'endpoint', - 'instanceGroupUrls', - 'clusterCaCertificate', - 'currentMasterVersion', - 'currentNodeVersion', - 'servicesIpv4Cidr' - ] - - if ( - # https://github.com/GoogleCloudPlatform/deploymentmanager-samples/issues/463 - propc.get('enableDefaultAuthOutput', False) and ( - propc.get('masterAuth', {}).get('clientCertificateConfig', False) - ) - ): - output_props.append('clientCertificate') - output_props.append('clientKey') - - for outprop in output_props: - output_obj = {} - output_obj['name'] = outprop - ma_props = ['clusterCaCertificate', 'clientCertificate', 'clientKey'] - if outprop in ma_props: - output_obj['value'] = '$(ref.' + context.env['name'] + \ - '.masterAuth.' + outprop + ')' - elif outprop == 'instanceGroupUrls': - output_obj['value'] = '$(ref.' + context.env['name'] + \ - '.nodePools[0].' + outprop + ')' - else: - output_obj['value'] = '$(ref.' + context.env['name'] + '.' + outprop + ')' - - outputs.append(output_obj) - - return {'resources': resources, 'outputs': outputs} diff --git a/dm/templates/gke/gke.py.schema b/dm/templates/gke/gke.py.schema deleted file mode 100644 index 5868f788772..00000000000 --- a/dm/templates/gke/gke.py.schema +++ /dev/null @@ -1,1146 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Google Kubernetes Engine (GKE) - author: Sourced Group Inc. - version: 1.1.3 - description: | - Schema for deploying a GKE cluster. - - For more information on this resource: - https://cloud.google.com/kubernetes-engine/docs - - APIs endpoints used by this template: - - gcp-types/container-v1beta1:projects.locations.clusters => - https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters - - gcp-types/container-v1beta1:projects.zones.clusters => - https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.zones.clusters - -additionalProperties: false - -required: - - cluster - -definitions: - locations: - type: array - uniqueItems: True - description: | - The list of the Google Compute Engine locations in which the cluster's - nodes should be located. - items: - type: string - initialNodeCount: - type: number - description: | - The number of nodes to create in this cluster. You must ensure that - your Compute Engine resource quota is sufficient for this number of - instances. You must also have available firewall and routes quota. - minimum: 1 - nodeConfig: - type: object - additionalProperties: false - description: Parameters used in creating the cluster's nodes. - required: - - oauthScopes - properties: - machineType: - type: string - description: | - The name of a Google Compute Engine machine type (e.g. n1-standard-1). - - If unspecified, the default machine type is n1-standard-1. - diskSizeGb: - type: number - minimum: 10 - description: | - Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. - - If unspecified, the default disk size is 100GB. - imageType: - type: string - default: COS - description: | - The image type to use for this node. Note that for a given image type, the latest version of it will be used. - enum: - - cos # backwards compatible spelling - - Ubuntu # backwards compatible spelling - - COS - - UBUNTU - - COS_CONTAINERD - - UBUNTU_CONTAINERD - oauthScopes: - type: array - uniqueItems: True - description: | - The set of Google API scopes to be made available on all of the node VMs under the "default" service account. - - The following scopes are recommended, but not required, and by default are not included: - - https://www.googleapis.com/auth/compute is required for mounting persistent storage on your nodes. - https://www.googleapis.com/auth/devstorage.read_only is required for communicating with gcr.io. - If unspecified, no scopes are added, unless Cloud Logging or Cloud Monitoring are enabled, - in which case their required scopes will be added. - items: - type: string - serviceAccount: - type: string - description: | - The Google Cloud Platform Service Account to be used by the node VMs. - If no Service Account is specified, the "default" service account is used. - metadata: - type: object - pattern: "[a-zA-Z0-9-_]+" - description: | - The metadata key/value pairs assigned to instances in the cluster. - - Keys must conform to the regexp [a-zA-Z0-9-_]+ and be less than 128 bytes in length. - These are reflected as part of a URL in the metadata server. Additionally, to avoid ambiguity, - keys must not conflict with any other metadata keys for the project or be one of the reserved keys: - "cluster-location" "cluster-name" "cluster-uid" "configure-sh" "containerd-configure-sh" "enable-os-login" - "gci-update-strategy" "gci-ensure-gke-docker" "instance-template" "kube-env" "startup-script" "user-data" - "disable-address-manager" "windows-startup-script-ps1" "common-psm1" "k8s-node-setup-psm1" "install-ssh-psm1" - "user-profile-psm1" "serial-port-logging-enable" - - Values are free-form strings, and only have meaning as interpreted by the image running in the instance. - The only restriction placed on them is that each value's size must be less than or equal to 32 KB. - - The total size of all keys and values must be less than 512 KB. - - An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. - labels: - type: object - description: | - The map of Kubernetes labels (key/value pairs) to be applied to each node. - These will added in addition to any default label(s) that Kubernetes may apply to the node. - In case of conflict in label keys, the applied set may differ depending on the Kubernetes version -- - it's best to assume the behavior is undefined and conflicts should be avoided. For more information, - including usage and the valid values, see: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - - An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. - localSsdCount: - type: number - description: | - The number of local SSD disks to be attached to the node. - - The limit for this value is dependant upon the maximum number of disks available on a machine per zone. - See: https://cloud.google.com/compute/docs/disks/local-ssd#local_ssd_limits for more information. - tags: - type: array - uniqueItems: True - description: | - The list of instance tags applied to all nodes. Tags are used to identify valid sources or targets for - network firewalls and are specified by the client during cluster or node pool creation. - Each tag within the list must comply with RFC1035. - items: - type: string - preemptible: - type: boolean - description: | - Whether the nodes are created as preemptible VM instances. - See: https://cloud.google.com/compute/docs/instances/preemptible for more information about preemptible VM instances. - sandboxConfig: - type: object - additionalProperties: false - description: | - Sandbox configuration for this node. - required: - - sandboxType - properties: - sandboxType: - type: string - description: | - Type of the sandbox to use for the node (e.g. 'gvisor') - diskType: - type: string - description: | - Type of the disk attached to each node (e.g. 'pd-standard' or 'pd-ssd') - - If unspecified, the default disk type is 'pd-standard' - enum: - - pd-standard - - pd-ssd - accelerators: - type: array - uniqueItems: True - description: | - A list of hardware accelerators to be attached to each node. - See https://cloud.google.com/compute/docs/gpus for more information about support for GPUs. - items: - type: object - additionalProperties: false - description: The Hardware Accelerator request object. - required: - - acceleratorCount - - acceleratorType - properties: - acceleratorCount: - type: string - description: | - The number of the accelerator cards exposed to an instance. - acceleratorType: - type: string - description: | - The accelerator type resource name. The list of supported - accelerator types can be found here - https://cloud.google.com/compute/docs/gpus/#Introduction - minCpuPlatform: - type: string - description: | - Specifies a minimum CPU platform for the VM instance. Applicable values are the friendly names of CPU platforms, - such as minCpuPlatform: "Intel Haswell" or minCpuPlatform: "Intel Sandy Bridge". - enum: - - Intel Sandy Bridge - - Intel Ivy Bridge - - Intel Haswell - - Intel Broadwell - - Intel Skylake - workloadMetadataConfig: - type: object - additionalProperties: false - description: | - The workload metadata configuration for the node. - required: - - nodeMetadata - properties: - nodeMetadata: - type: string - description: | - Configuration that defines how to expose the node metadata to the workload running on the node. - enum: - - UNSPECIFIED - - SECURE - - EXPOSE - - GKE_METADATA_SERVER - shieldedInstanceConfig: - type: object - additionalProperties: false - description: | - Shielded Instance options. - properties: - enableSecureBoot: - type: boolean - description: | - Defines whether the instance has Secure Boot enabled. - - Secure Boot helps ensure that the system only runs authentic software by verifying the digital - signature of all boot components, and halting the boot process if signature verification fails. - enableIntegrityMonitoring: - type: boolean - description: | - Defines whether the instance has integrity monitoring enabled. - - Enables monitoring and attestation of the boot integrity of the instance. The attestation is - performed against the integrity policy baseline. This baseline is initially derived from the - implicitly trusted boot image when the instance is created. - taints: - type: array - uniqueItems: True - description: | - A list of Kubernetes taints to be applied to each node. - items: - type: object - additionalProperties: false - description: The taint object's key, value, and effect. - required: - - key - - value - - effect - properties: - key: - type: string - description: | - The taint object's key. - value: - type: string - description: | - The taint object's value. - effect: - type: string - enum: - - EFFECT_UNSPECIFIED - - NO_SCHEDULE - - PREFER_NO_SCHEDULE - - NO_EXECUTE - -properties: - project: - type: string - description: | - The project ID of the project containing the cluster. The - Google apps domain is prefixed if applicable. - clusterLocationType: - type: string - description: | - Location type for the cluster Zonal or Regional. DEPRECATED - enum: - - Regional - - Zonal - region: - type: string - description: | - The region the cluster belongs to. Should be set when clusterLocationType is set to Regional. DEPRECATED - location: - type: string - description: | - The location the cluster belongs to. - zone: - type: string - description: The zone the cluster belongs to. - cluster: - type: object - additionalProperties: false - description: The cluster configuration. - required: - - network - - subnetwork - allOf: - # We should either be using unified Stackdriver monitoring for _both_ - # logging and monitoring, or for _neither_. - - oneOf: - - properties: - loggingService: - enum: ["logging.googleapis.com/kubernetes"] - monitoringService: - enum: ["monitoring.googleapis.com/kubernetes"] - - allOf: - - not: - properties: - loggingService: - enum: ["logging.googleapis.com/kubernetes"] - - not: - properties: - monitoringService: - enum: ["monitoring.googleapis.com/kubernetes"] - - oneOf: - - allOf: - - required: - - nodePools - - not: - required: - - initialNodeCount - - required: - - nodeConfig - properties: - name: - type: string - description: The name of the cluster. - description: - type: string - description: An optional description of the cluster. - initialNodeCount: - description: | - Deprecated. Use nodePools.initialNodeCount instead. - $ref: '#/definitions/initialNodeCount' - nodeConfig: - description: | - Deprecated. Use nodePools.config instead. - $ref: '#/definitions/nodeConfig' - nodePools: - type: array - uniqueItems: True - minItems: 1 - description: | - The node pools associated with this cluster. This field should not be set if "nodeConfig" or - "initialNodeCount" are specified. - required: - - name - - config - items: - type: object - properties: - name: - type: string - description: | - The name of the node pool. - config: - $ref: '#/definitions/nodeConfig' - initialNodeCount: - $ref: '#/definitions/initialNodeCount' - locations: - $ref: '#/definitions/locations' - version: - type: string - description: | - The version of the Kubernetes of this node. - autoscaling: - type: object - additionalProperties: false - description: | - Autoscaler configuration for this NodePool. - Autoscaler is enabled only if a valid configuration is present. - properties: - enabled: - type: boolean - description: | - Is autoscaling enabled for this node pool. - minNodeCount: - type: integer - description: | - Minimum number of nodes in the NodePool. Must be >= 1 and <= maxNodeCount. - maxNodeCount: - type: integer - description: | - Maximum number of nodes in the NodePool. Must be >= minNodeCount. There has to enough quota to scale up the cluster. - autoprovisioned: - type: boolean - description: | - Can this node pool be deleted automatically. - management: - type: object - additionalProperties: false - description: | - NodeManagement configuration for this NodePool. - properties: - autoUpgrade: - type: boolean - description: | - Whether the nodes will be automatically upgraded. - autoRepair: - type: boolean - description: | - Whether the nodes will be automatically repaired. - maxPodsConstraint: - type: object - additionalProperties: false - description: | - The constraint on the maximum number of pods that can be run simultaneously on a node in the node pool. - properties: - maxPodsPerNode: - type: integer - description: | - Constraint enforced on the max num of pods per node. - enableDefaultAuthOutput: - type: boolean - default: False - description: | - If clientKey/clientCertificate should be returned. - masterAuth: - type: object - additionalProperties: false - description: | - The authentication information for accessing the master endpoint. If unspecified, the - defaults are used: For clusters before v1.12, if masterAuth is unspecified, username will be set to "admin", - a random password will be generated, and a client certificate will be issued. - properties: - username: - type: string - description: | - The username to use for HTTP basic authentication to the master endpoint. For clusters v1.6.0 and later, - basic authentication can be disabled by leaving username unspecified (or setting it to the empty string). - password: - type: string - description: | - The password to use for HTTP basic authentication to the master endpoint. Because the master endpoint - is open to the Internet, you should create a strong password. If a password is provided for cluster - creation, username must be non-empty. - minLength: 16 - clientCertificateConfig: - type: object - additionalProperties: false - description: | - The configuration for client certificates on the cluster. - require: - - issueClientCertificate - properties: - issueClientCertificate: - type: boolean - description: | - Issue a client certificate. - initialClusterVersion: - type: string - description: | - The initial Kubernetes version for this cluster. Valid versions are those found in validMasterVersions - returned by getServerConfig. The version can be upgraded over time; such upgrades are reflected - in currentMasterVersion and currentNodeVersion. - - Users may specify either explicit versions offered by Kubernetes Engine or version aliases, - which have the following behavior: - - "latest": picks the highest valid Kubernetes version - "1.X": picks the highest valid patch+gke.N patch in the 1.X version - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version - "1.X.Y-gke.N": picks an explicit Kubernetes version - "","-": picks the default Kubernetes version - loggingService: - type: string - default: logging.googleapis.com - description: | - The logging service the cluster should use to write logs. Currently available options: - - logging.googleapis.com/kubernetes - unified Kubernetes logging/monitoring service. - logging.googleapis.com - legacy Google Cloud Logging service. - none - no logs will be exported from the cluster. - if left as an empty string, logging.googleapis.com will be used. - enum: - - none - - logging.googleapis.com - - logging.googleapis.com/kubernetes - monitoringService: - type: string - default: monitoring.googleapis.com - description: | - The monitoring service the cluster should use to write metrics. Currently available options: - - monitoring.googleapis.com/kubernetes - unified Kubernetes logging/monitoring service. - monitoring.googleapis.com - legacy Google Cloud Monitoring service. - none - no metrics will be exported from the cluster. - if left as an empty string, monitoring.googleapis.com will be used. - enum: - - none - - monitoring.googleapis.com - - monitoring.googleapis.com/kubernetes - network: - type: string - default: default - description: | - The name of the Google Compute Engine network to which the cluster is connected. If left unspecified, - the default network will be used. On output this shows the network ID instead of the name. - subnetwork: - type: string - description: | - The name of the Google Compute Engine subnetwork to which the cluster is connected. - On output this shows the subnetwork ID instead of the name. - clusterIpv4Cidr: - type: string - description: | - The IP address range of the container pods in the cluster, - in the CIDR notation (e.g. 10.96.0.0/14). Leave blank to have one - automatically chosen or specify a /14 block in 10.0.0.0/8. - locations: - $ref: '#/definitions/locations' - enableKubernetesAlpha: - type: boolean - description: | - Kubernetes alpha features are enabled on this cluster. This includes alpha API groups (e.g. v1beta1) - and features that may not be production ready in the kubernetes version of the master and nodes. - The cluster has no SLA for uptime and master/node upgrades are disabled. - Alpha enabled clusters are automatically deleted thirty days after creation. - resourceLabels: - type: object - description: | - The resource labels for the cluster to use to annotate any related Google Compute Engine resources. - - An object containing a list of "key": value pairs. - Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. - labelFingerprint: - type: string - description: The fingerprint of the set of labels for the cluster. - legacyAbac: - type: object - additionalProperties: false - description: The configuration for the legacy ABAC authorization mode. - required: - - enabled - properties: - enabled: - type: boolean - default: False - description: | - Whether the ABAC authorizer is enabled for this cluster. When enabled, identities in the system, - including service accounts, nodes, and controllers, will have statically granted permissions - beyond those provided by the RBAC configuration or IAM. - networkPolicy: - type: object - additionalProperties: false - description: | - The configuration options for the NetworkPolicy feature - https://kubernetes.io/docs/concepts/services-networking/networkpolicies/ - properties: - provider: - type: string - description: The selected network policy provider. - enum: - - PROVIDER_UNSPECIFIED - - CALICO - enabled: - type: boolean - description: | - Defines whether the network policy is enabled on the cluster. - ipAllocationPolicy: - type: object - additionalProperties: false - description: The configuration for the cluster IP allocation. - properties: - useIpAliases: - type: boolean - description: | - Defines whether alias IPs are used for pod IPs in the cluster. - createSubnetwork: - type: boolean - description: | - Defines whether a new subnetwork is created automatically for the - cluster. This field is only applicable is useIpAliases is True. - subnetworkName: - type: string - description: | - A custom subnetwork name to be used if createSubnetwork is True. - If this field is empty, a name is automatically generated for - the new subnetwork. - clusterSecondaryRangeName: - type: string - description: | - The name of the secondary range to be used for the cluster CIDR - block. The secondary range is used for pod IP addresses. - This must be an existing secondary range associated with the - cluster subnetwork.This is only applicable if both - useIpAliases and createSubnetwork are False. - servicesSecondaryRangeName: - type: string - description: | - The name of the secondary range to be used as for the service - CIDR block. The secondary range is used for service ClusterIPs. - This must be an existing secondary range associated with - the cluster subnetwork. This is only applicable if both - useIpAliases and createSubnetwork are False. - clusterIpv4CidrBlock: - type: string - description: | - The IP address range for the cluster pod IPs. If this field is set, - then cluster.cluster_ipv4_cidr must be left blank. - This field is only applicable when useIpAliases is True. - Set to blank to have a range chosen with the default size. - Set to /netmask (e.g., /14) to have a range chosen with a specific - netmask. Set to a CIDR notation (e.g., 10.96.0.0/14) from the - RFC-1918 private networks - (e.g., 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to pick a specific - range to use. - nodeIpv4CidrBlock: - type: string - description: | - The IP address range of the instance IPs in this cluster. - This is applicable only if createSubnetwork is True. - Set to blank to have a range chosen with the default size. - Set to /netmask (e.g., /14) to have a range chosen with a specific - netmask. Set to a CIDR notation (e.g., 10.96.0.0/14) from the - RFC-1918 private networks - (e.g., 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to pick a specific - range to use. - servicesIpv4CidrBlock: - type: string - description: | - The IP address range of the services IPs in the cluster. - This field is only applicable when useIpAliases is True. - Set to blank to have a range chosen with the default size. - Set to /netmask (e.g., /14) to have a range chosen with a specific - netmask. Set to a CIDR notation (e.g., 10.96.0.0/14) from the - RFC-1918 private networks - (e.g., 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to pick a specific - range to use. - allowRouteOverlap: - type: boolean - description: | - If True, allows allocation of cluster CIDR ranges that overlap with - certain kinds of network routes (with CIDR ranges that are larger - than the cluster CIDR range). By default, we do not allow cluster - CIDR ranges to intersect with any user-declared routes. - tpuIpv4CidrBlock: - type: string - description: | - The IP address range of the Cloud TPUs in this cluster. - If unspecified, a range will be automatically chosen with the default size. - - This field is only applicable when useIpAliases is true. - - If unspecified, the range will use the default size. - - Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. - - Set to a CIDR notation (e.g. 10.96.0.0/14) from the RFC-1918 private networks - (e.g. 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to pick a specific range to use. - masterAuthorizedNetworksConfig: - type: object - additionalProperties: false - description: | - The configuration for the master authorized networks feature. - required: - - enabled - - cidrBlocks - properties: - enabled: - type: boolean - description: | - Defines whether the master authorized networks feature is enabled. - cidrBlocks: - type: array - uniqueItems: True - description: | - A list of cidrBlocks in the CIDR notation. - items: - type: object - additionalProperties: false - description: The CIDR block object. - required: - - cidrBlock - properties: - displayName: - type: string - description: An optional display name for the CIDR block. - cidrBlock: - type: string - description: The cidrBlock in the CIDR notation. - releaseChannel: - type: object - additionalProperties: false - description: | - ReleaseChannel indicates which release channel a cluster is subscribed to. Release channels are arranged in order of risk and frequency of updates. - - When a cluster is subscribed to a release channel, Google maintains both the master version and the node version. - Node auto-upgrade defaults to true and cannot be disabled. Updates to version related fields (e.g. currentMasterVersion) return an error. - properties: - channel: - type: string - default: REGULAR - description: | - Use RAPID, REGULAR or STABLE. Defaults to REGULAR - enum: - - REGULAR - - RAPID - - STABLE - addonsConfig: - type: object - additionalProperties: false - description: | - Configurations for the various addons available to run in the cluster. - properties: - httpLoadBalancing: - type: object - additionalProperties: false - description: | - Configuration for the HTTP (L7) load balancing controller addon, which makes it easy to set up - HTTP load balancers for services in a cluster. - required: - - disabled - properties: - disabled: - type: boolean - description: | - Whether the HTTP Load Balancing controller is enabled in the cluster. - When enabled, it runs a small pod in the cluster that manages the load balancers. - horizontalPodAutoscaling: - type: object - additionalProperties: false - description: | - Configuration for the horizontal pod autoscaling feature, which increases or decreases the number - of replica pods a replication controller has based on the resource usage of the existing pods. - required: - - disabled - properties: - disabled: - type: boolean - description: | - Whether the Horizontal Pod Autoscaling feature is enabled in the cluster. When enabled, it ensures - that a Heapster pod is running in the cluster, which is also used by the Cloud Monitoring service. - kubernetesDashboard: - type: object - additionalProperties: false - description: | - Configuration for the Kubernetes Dashboard. This addon is deprecated, and will be disabled in 1.15. - It is recommended to use the Cloud Console to manage and monitor your Kubernetes clusters, workloads - and applications. - For more information, see: https://cloud.google.com/kubernetes-engine/docs/concepts/dashboards - required: - - disabled - properties: - disabled: - type: boolean - description: | - Whether the Kubernetes Dashboard is enabled for this cluster. - networkPolicyConfig: - type: object - additionalProperties: false - description: | - The configuration for the NetworkPolicy add-on. This only tracks - whether the add-on is enabled on the Master. It does not track - whether network policy is enabled for the nodes. - required: - - disabled - properties: - disabled: - type: boolean - description: | - Whether NetworkPolicy is enabled for this cluster. - istioConfig: - type: object - additionalProperties: false - description: | - Configuration for Istio, an open platform to connect, manage, and secure microservices. - required: - - disabled - properties: - disabled: - type: boolean - description: | - Whether Istio is enabled for this cluster. - auth: - type: string - description: | - The specified Istio auth mode, either none, or mutual TLS. - enum: - - AUTH_NONE - - AUTH_MUTUAL_TLS - cloudRunConfig: - type: object - additionalProperties: false - description: | - Configuration for the Cloud Run addon. The IstioConfig addon must be enabled in order to enable - Cloud Run addon. This option can only be enabled at cluster creation time. - required: - - disabled - properties: - disabled: - type: boolean - description: | - Whether Cloud Run addon is enabled for this cluster. - autoscaling: - type: object - additionalProperties: false - description: | - Cluster-level autoscaling configuration. - properties: - enableNodeAutoprovisioning: - type: boolean - description: | - Enables automatic node pool creation and deletion. - resourceLimits: - type: array - uniqueItems: True - description: | - Contains global constraints regarding minimum and maximum amount of resources in the cluster. - items: - type: object - additionalProperties: false - properties: - resourceType: - type: string - description: | - Resource name "cpu", "memory" or gpu-specific string. - minimum: - type: integer - description: | - Minimum amount of the resource in the cluster. - maximum: - type: integer - description: | - Maximum amount of the resource in the cluster. - autoscalingProfile: - type: string - description: | - Defines autoscaling behaviour. - PROFILE_UNSPECIFIED: No change to autoscaling configuration. - OPTIMIZE_UTILIZATION: Prioritize optimizing utilization of resources. - BALANCED: Use default (balanced) autoscaling configuration. - enum: - - PROFILE_UNSPECIFIED - - OPTIMIZE_UTILIZATION - - BALANCED - autoprovisioningNodePoolDefaults: - type: object - additionalProperties: false - description: | - AutoprovisioningNodePoolDefaults contains defaults for a node pool created by NAP. - properties: - oauthScopes: - type: array - uniqueItems: True - description: | - Scopes that are used by NAP when creating node pools. - If oauthScopes are specified, serviceAccount should be empty. - items: - type: string - serviceAccount: - type: string - description: | - The Google Cloud Platform Service Account to be used by the node VMs. - If serviceAccount is specified, scopes should be empty. - autoprovisioningLocations: - type: array - uniqueItems: True - description: | - The list of Google Compute Engine zones in which the NodePool's nodes can be created by NAP. - items: - type: string - binaryAuthorization: - type: object - additionalProperties: false - description: | - Configuration for Binary Authorization. - properties: - enabled: - type: boolean - description: | - Enable Binary Authorization for this cluster. - If enabled, all container images will be validated by Google Binauthz. - maintenancePolicy: - type: object - additionalProperties: false - description: | - The configuration of the maintenance policy for the cluster. - properties: - window: - type: object - additionalProperties: false - description: | - The time window within which maintenance may be performed. - properties: - dailyMaintenanceWindow: - type: object - additionalProperties: false - description: The daily maintenance operation window. - properties: - startTime: - type: string - description: | - Time within the maintenance window to start the - maintenance operations. It must be in the HH:MM - format, where HH 00-23 and MM 00-59 GMT. - defaultMaxPodsConstraint: - type: object - additionalProperties: false - description: | - The default constraint on the maximum number of pods that can be run simultaneously on a node in the node pool of this cluster. Only honored if cluster created with IP Alias support. - required: - - maxPodsPerNode - properties: - maxPodsPerNode: - type: integer - description: | - Constraint enforced on the max num of pods per node. - resourceUsageExportConfig: - type: object - additionalProperties: false - description: | - Configuration for exporting resource usages. Resource usage export is disabled when this config unspecified. - properties: - bigqueryDestination: - type: object - additionalProperties: false - description: | - Configuration to use BigQuery as usage export destination. - required: - - datasetId - properties: - datasetId: - type: string - description: | - The ID of a BigQuery Dataset. - enableNetworkEgressMetering: - type: boolean - description: | - Whether to enable network egress metering for this cluster. - If enabled, a daemonset will be created in the cluster to meter network egress traffic. - consumptionMeteringConfig: - type: object - additionalProperties: false - description: | - Configuration to enable resource consumption metering. - required: - - enabled - properties: - enabled: - type: boolean - description: | - Whether to enable consumption metering for this cluster. - If enabled, a second BigQuery table will be created to hold resource consumption records. - podSecurityPolicyConfig: - type: object - additionalProperties: false - description: | - The configuration for the PodSecurityPolicy feature. - required: - - enabled - properties: - enabled: - type: boolean - description: | - Enable the PodSecurityPolicy controller for this cluster. - If enabled, pods must be valid under a PodSecurityPolicy to be created. - authenticatorGroupsConfig: - type: object - additionalProperties: false - description: | - Configuration controlling RBAC group membership information. - required: - - enabled - properties: - enabled: - type: boolean - description: | - Whether this cluster should return group membership lookups during authentication - using a group of security groups. - securityGroup: - type: string - description: | - The name of the security group-of-groups to be used. Only relevant if enabled = true. - privateClusterConfig: - type: object - additionalProperties: false - description: | - Configuration for private cluster. - properties: - enablePrivateNodes: - type: boolean - description: | - Whether nodes have internal IP addresses only. If enabled, all nodes are given only RFC 1918 - private addresses and communicate with the master via private networking. - enablePrivateEndpoint: - type: boolean - description: | - Whether the master's internal IP address is used as the cluster endpoint. - enablePeeringRouteSharing: - type: boolean - description: | - Whether to enable route sharing over the network peering. - masterIpv4CidrBlock: - type: string - description: | - The IP range in CIDR notation to use for the hosted master network. This range will be used - for assigning internal IP addresses to the master or set of masters, as well as the ILB VIP. - This range must not overlap with any other ranges in use within the cluster's network. - verticalPodAutoscaling: - type: object - additionalProperties: false - description: | - Cluster-level Vertical Pod Autoscaling configuration. - required: - - enabled - properties: - enabled: - type: boolean - description: | - Enables vertical pod autoscaling. - tierSettings: - type: object - additionalProperties: false - description: | - Cluster tier settings. - required: - - tier - properties: - tier: - type: string - description: | - Cluster tier. - enum: - - UNSPECIFIED - - STANDARD - - ADVANCED - workloadIdentityConfig: - type: object - additionalProperties: false - description: | - Configuration for the use of Kubernetes Service Accounts in GCP IAM policies. - properties: - workloadPool: - type: string - description: | - The workload pool to attach all Kubernetes service accounts to. - databaseEncryption: - type: object - additionalProperties: false - description: | - Configuration of etcd encryption. - properties: - state: - type: string - description: | - Denotes the state of etcd encryption. - enum: - - UNKNOWN - - ENCRYPTED - - DECRYPTED - keyName: - type: string - description: | - Name of CloudKMS key to use for the encryption of secrets in etcd. - Ex. projects/my-project/locations/global/keyRings/my-ring/cryptoKeys/my-key - enableTpu: - type: boolean - description: | - Enable the ability to use Cloud TPUs in this cluster. - privateCluster: - type: boolean - description: | - If this is a private cluster setup. Private clusters are clusters that, by default have no - external IP addresses on the nodes and where nodes and the master communicate over private IP addresses. - This field is deprecated, use privateClusterConfig.enable_private_nodes instead. - masterIpv4CidrBlock: - type: string - description: | - The IP prefix in CIDR notation to use for the hosted master network. - This prefix will be used for assigning private IP addresses to the master or set of masters, as well as - the ILB VIP. This field is deprecated, use privateClusterConfig.master_ipv4_cidr_block instead. -outputs: - selfLink: - type: string - description: The server-defined resource URL. - endpoint: - type: string - description: The IP address of the cluster's Kubernetes Master. - currentMasterVersion: - type: string - description: The current version of the master in the cluster. - currentNodeVersion: - type: string - description: | - The current version of the node software components. In case of - multiple versions (e.g., when the components are in the process - of being upgraded), this parameter reflects the minimum version - among all nodes. - nodeIpv4CidrSize: - type: number - description: | - The size of the address space on each node for hosting containers. - This is provisioned from within the container_ipv4_cidr range. - servicesIpv4Cidr: - type: string - description: | - The IP address range of the Kubernetes services in the cluster, - in the CIDR notation (e.g., 1.2.3.4/29). Service addresses are - typically put in the last /16 of the container CIDR. - instanceGroupUrls: - type: array - items: - type: string - description: | - A list of instance group URLs that have been assigned to the - cluster. - clientCertificate: - type: string - description: | - The Base64-encoded public certificate the clients use to authenticate - to the cluster endpoint. - clientKey: - type: string - description: | - The Base64-encoded private key the clients use to authenticate - to the cluster endpoint. - clusterCaCertificate: - type: string - description: | - The Base64-encoded public certificate that is the root of trust for - the cluster. - maintenanceWindowDuration: - type: string - description: | - Duration of the maintenance time window; automatically chosen to be - the smallest possible in the given scenario. The duration is in the - RFC3339 format PTnHnMnS e.g., "PT4H0M0S". - -documentation: - - templates/gke/README.md - -examples: - - templates/gke/examples/gke.yaml diff --git a/dm/templates/gke/tests/integration/gke.bats b/dm/templates/gke/tests/integration/gke.bats deleted file mode 100644 index 70a19cb9b9a..00000000000 --- a/dm/templates/gke/tests/integration/gke.bats +++ /dev/null @@ -1,147 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" - # Test specific variables - export CLUSTER_NAME="testcluster-${RAND}" - export REGION="us-east1" - export NETWORK_NAME="test-k8nw-${RAND}" - export SUBNET_NAME="test-k8subnet-${RAND}" - export MACHINE_TYPE="n1-standard-1" - export NODE_COUNT="1" - export LOCALSSD_COUNT="1" - export CLUSTER_VERSION="latest" - export LOGGING_SERVICE="logging.googleapis.com/kubernetes" - export MONITORING_SERVICE="monitoring.googleapis.com/kubernetes" - export MASTERIPV4_CIDRBLOCK="172.16.0.0/28" - export CLUSTERIPV4_CIDR="10.0.0.0/11" - export SERVICESIPV4_CIDRBLOCK="10.96.0.0/18" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < ${BATS_TEST_DIRNAME}/${TEST_NAME}.yaml > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - # Create a VPC network and a subnet for deploying the cluster. - gcloud compute networks create "${NETWORK_NAME}" \ - --subnet-mode custom - - gcloud compute networks subnets create "${SUBNET_NAME}" \ - --region ${REGION} --network "${NETWORK_NAME}" \ - --range 10.200.0.0/24 - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - delete_config - rm -f "${RANDOM_FILE}" - # Delete the VPC subnets and network after the tests are completed. - gcloud compute networks subnets delete "${SUBNET_NAME}" \ - --region ${REGION} -q - - gcloud compute networks delete "${NETWORK_NAME}" -q - fi - - # Per-test teardown steps. -} - -########## TESTS ########## - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config "${CONFIG}" --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] -} - -@test "Verify if cluster: ${CLUSTER_NAME} was created " { - run gcloud container clusters describe "${CLUSTER_NAME}" \ - --region ${REGION} --format="value(name)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${CLUSTER_NAME}" ]] -} - -@test "Cluster ${CLUSTER_NAME} is deployed to network ${NETWORK_NAME}" { - run gcloud container clusters describe "${CLUSTER_NAME}" \ - --region ${REGION} --format="value(network)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${NETWORK_NAME}" ]] -} - -@test "Network ${NETWORK_NAME} has subnet ${SUBNET_NAME}" { - run gcloud container clusters describe "${CLUSTER_NAME}" \ - --region ${REGION} \ - --format="value(networkConfig.subnetwork.scope(subnetworks))" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${SUBNET_NAME}" ]] -} - -@test "NodeCount on ${CLUSTER_NAME} is ${NODE_COUNT}" { - run gcloud container clusters describe "${CLUSTER_NAME}" \ - --region ${REGION} --format="value(currentNodeCount)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "2" ]] -} - -@test "Cluster ${CLUSTER_NAME} machineType is ${MACHINE_TYPE}" { - run gcloud container clusters describe "${CLUSTER_NAME}" \ - --region ${REGION} --format="value(nodeConfig.machineType)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${MACHINE_TYPE}" ]] -} - -@test "Logging service on ${CLUSTER_NAME} is ${LOGGING_SERVICE}" { - run gcloud container clusters describe "${CLUSTER_NAME}" \ - --region ${REGION} --format="value(loggingService)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${LOGGING_SERVICE}" ]] -} - -@test "Monitoring service on ${CLUSTER_NAME} is ${MONITORING_SERVICE}" { - run gcloud container clusters describe "${CLUSTER_NAME}" \ - --region ${REGION} --format="value(monitoringService)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${MONITORING_SERVICE}" ]] -} - -@test "Deleting deployment ${DEPLOYMENT_NAME}" { - gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -q - [[ "$status" -eq 0 ]] - - run gcloud container clusters describe "${CLUSTER_NAME}" \ - --region ${REGION} --format="value(name)" - [[ "$status" -ne 0 ]] -} diff --git a/dm/templates/gke/tests/integration/gke.yaml b/dm/templates/gke/tests/integration/gke.yaml deleted file mode 100644 index 8f700ad3b42..00000000000 --- a/dm/templates/gke/tests/integration/gke.yaml +++ /dev/null @@ -1,39 +0,0 @@ -# Test of the GKE (Google Kubernetes Engine) template. - -imports: - - path: templates/gke/gke.py - name: gke.py - -resources: - - name: ${CLUSTER_NAME} - type: gke.py - properties: - clusterLocationType: Regional - region: ${REGION} - cluster: - name: ${CLUSTER_NAME} - description: test gke cluster - network: ${NETWORK_NAME} - subnetwork: ${SUBNET_NAME} - initialClusterVersion: ${CLUSTER_VERSION} - nodePools: - - name: default - initialNodeCount: ${NODE_COUNT} - config: - machineType: ${MACHINE_TYPE} - oauthScopes: - - https://www.googleapis.com/auth/compute - - https://www.googleapis.com/auth/devstorage.read_only - - https://www.googleapis.com/auth/logging.write - - https://www.googleapis.com/auth/monitoring - localSsdCount: ${LOCALSSD_COUNT} - locations: - - us-east1-b - - us-east1-d - loggingService: ${LOGGING_SERVICE} - monitoringService: ${MONITORING_SERVICE} - masterIpv4CidrBlock: ${MASTERIPV4_CIDRBLOCK} - clusterIpv4Cidr: ${CLUSTERIPV4_CIDR} - ipAllocationPolicy: - useIpAliases: True - servicesIpv4CidrBlock: ${SERVICESIPV4_CIDRBLOCK} diff --git a/dm/templates/haproxy/README.md b/dm/templates/haproxy/README.md deleted file mode 100644 index 353a58217ed..00000000000 --- a/dm/templates/haproxy/README.md +++ /dev/null @@ -1,71 +0,0 @@ -# HAProxy - -This template: -- Creates a Compute Instance with an [HAProxy](http://www.haproxy.org/) installed -- Configures HAProxy to load-balance traffic between one or more of the provided -[instance groups](https://cloud.google.com/compute/docs/reference/rest/v1/instanceGroups) - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) -- Grant [compute.viewer](https://cloud.google.com/compute/docs/access/iam) role to -Compute Engine [default service account](https://cloud.google.com/compute/docs/access/service-accounts#compute_engine_default_service_account). -Alternatively, create a new service account with the above role, and add it to -the template's `resources.properties.serviceAccountEmail` property. -- Create one or more [instanceGroups](https://cloud.google.com/compute/docs/reference/rest/v1/instanceGroups) -to be load-balanced, and add them to `resources.properties.instances.groups` collection. - -## Deployment - -### Resources - -- [compute.v1.instance](https://cloud.google.com/compute/docs/reference/rest/v1/instances) - -### Properties - -See the `properties` section in the schema file(s): -- [HAProxy](haproxy.py.schema) - -### Usage - -1. Clone the [Deployment Manager Samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit): - -```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit -``` - -2. Go to the [dm](../../) directory: - -```shell - cd dm -``` - -3. Copy the example DM config to be used as a model for the deployment; in this case, [examples/haproxy.yaml](examples/haproxy.yaml): - -```shell - cp templates/haproxy/examples/haproxy.yaml my_haproxy.yaml -``` - -4. Change the values in the config file to match your specific GCP setup (for properties, refer to the schema files listed above): - -```shell - vim my_haproxy.yaml # <== change values to match your GCP setup -``` - -5. Create your deployment (replace with the relevant deployment name): - -```shell - gcloud deployment-manager deployments create \ - --config my_haproxy.yaml -``` - -6. In case you need to delete your deployment: - -```shell - gcloud deployment-manager deployments delete -``` - -## Examples - -- [HAProxy](examples/haproxy.yaml) diff --git a/dm/templates/haproxy/examples/haproxy.yaml b/dm/templates/haproxy/examples/haproxy.yaml deleted file mode 100644 index 5262e3aa229..00000000000 --- a/dm/templates/haproxy/examples/haproxy.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Example of the HAProxy template usage. -# -# In this example, an HAProxy load balancing instance is created to route traffic -# between two existing managed instance groups: -# * zones/us-central1-a/instanceGroups/instance-group-1 -# * zones/us-east1-b/instanceGroups/instance-group-3 - -imports: - - path: templates/haproxy/haproxy.py - name: haproxy.py - -resources: - - name: ilb-haproxy - type: haproxy.py - properties: - zone: us-central1-a - machineType: f1-micro - network: default - loadBalancer: - algorithm: roundrobin - port: 8080 - mode: tcp - instances: - port: 80 - groups: - - instance-group-1 - - zones/us-east1-b/instanceGroups/instance-group-3 diff --git a/dm/templates/haproxy/haproxy.py b/dm/templates/haproxy/haproxy.py deleted file mode 100644 index 5c3cf9c2193..00000000000 --- a/dm/templates/haproxy/haproxy.py +++ /dev/null @@ -1,216 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" This template creates a Compute Instance with an HAProxy installed - and configured to load-balance traffic between instance groups. -""" - -DISK_IMAGE = 'projects/debian-cloud/global/images/family/debian-9' -SETUP_HAPROXY_SH = """#!/bin/bash - -set -euf -o pipefail - -apt-get update && apt-get install -y haproxy - -METADATA_SERVER="http://metadata.google.internal/computeMetadata/v1/instance/attributes" -function get_metadata() { - curl -s "$METADATA_SERVER/$1" -H "Metadata-Flavor: Google" -} - -# Set up an HAProxy update script. -CONFIG_UPDATER="/sbin/haproxy-conf-updater" -get_metadata "haproxy-conf-updater" > $CONFIG_UPDATER -REFRESH_RATE=`get_metadata "refresh-rate"` -chmod +x $CONFIG_UPDATER - -# Set up an HAProxy config. -$CONFIG_UPDATER - -# Keep the HAProxy config up to date. -CRONFILE=$(mktemp) -crontab -l > "${CRONFILE}" || true -echo "${REFRESH_RATE} * * * * ${CONFIG_UPDATER}" >> "${CRONFILE}" -crontab "${CRONFILE}" -service cron start -""" - -HAPROXY_CONF_UPDATER_SH = """#!/bin/bash - -set -euf -o pipefail - -METADATA_SERVER="http://metadata.google.internal/computeMetadata/v1/instance/attributes" - -function get_metadata() { - curl -s "$METADATA_SERVER/$1" -H "Metadata-Flavor: Google" -} - -CONFIG_FILE=/etc/haproxy/haproxy.cfg -BASE_CONFIG_FILE=${CONFIG_FILE}.bak - -if [ ! -f $BASE_CONFIG_FILE ]; then - cp $CONFIG_FILE $BASE_CONFIG_FILE -fi - -TEMP_CONFIG_FILE=`mktemp` -LB_ALGORITHM=`get_metadata lb-algorithm` -IG_PORT=`get_metadata ig-port` -LB_PORT=`get_metadata lb-port` -LB_MODE=`get_metadata lb-mode` - -# Build a server list. -SERVERS= -GCLOUD=`which gcloud` -for g in $(get_metadata groups); do - if [[ "${g}" =~ zones/([^/]+)/instanceGroups/(.*)$ ]]; then - GROUP="${BASH_REMATCH[2]}" - ZONE="${BASH_REMATCH[1]}" - SERVERS=${SERVERS}$'\\n'$($GCLOUD compute instance-groups list-instances \\ - $GROUP --zone $ZONE | grep -v NAME | \ - sed "s/^\\([^ ]*\\) .*\\$/ server \\1 \\1:$IG_PORT check/") - else - echo "Invalid group: ${g}" - fi -done - -# Set up the config file. -cp ${BASE_CONFIG_FILE} ${TEMP_CONFIG_FILE} - -echo " -# Internal load balancing config - -frontend tcp-in - bind *:$LB_PORT - mode $LB_MODE - option ${LB_MODE}log - default_backend instances - -backend instances - mode $LB_MODE - balance $LB_ALGORITHM -${SERVERS}" >> ${TEMP_CONFIG_FILE} - -# Update the config and restart if the config has changed. -ret=0 -diff ${TEMP_CONFIG_FILE} ${CONFIG_FILE} || ret=$? -if [ ${ret} -ne 0 ]; then - mv ${TEMP_CONFIG_FILE} ${CONFIG_FILE} - service haproxy restart -fi -""" - -def append_metadata_entry(metadata, new_key, new_value): - """ Appends a new key-value pair to the existing metadata. """ - - metadata['items'].append({ - 'key': new_key, - 'value': new_value - }) - -def configure_haproxy_frontend(properties, metadata): - """ Sets up user-facing HAProxy parameters. """ - - lb_properties = properties['loadBalancer'] - lb_algorithm = lb_properties['algorithm'] - lb_mode = lb_properties['mode'] - lb_port = lb_properties['port'] - - append_metadata_entry(metadata, 'lb-algorithm', lb_algorithm) - append_metadata_entry(metadata, 'lb-port', lb_port) - append_metadata_entry(metadata, 'lb-mode', lb_mode) - -def configure_haproxy_backend(home_zone, properties, metadata): - """ Sets up instance-facing HAProxy parameters. """ - - instances_properties = properties['instances'] - append_metadata_entry(metadata, 'ig-port', instances_properties['port']) - groups = ' '.join(['zones/{}/instanceGroups/{}'.format(home_zone, group) - if 'zones/' not in group - else group - for group - in instances_properties['groups']]) - - append_metadata_entry(metadata, 'groups', groups) - cron_refresh_rate = instances_properties['refreshIntervalMin'] - cron_minutes_value = '*/' + str(cron_refresh_rate) - append_metadata_entry(metadata, 'refresh-rate', cron_minutes_value) - -def configure_haproxy_setup(metadata): - """ Adds metadata that installs and configures the HAProxy. """ - - append_metadata_entry(metadata, 'startup-script', SETUP_HAPROXY_SH) - append_metadata_entry(metadata, 'haproxy-conf-updater', - HAPROXY_CONF_UPDATER_SH) - - -def generate_config(context): - """ Entry point for the deployment resources. """ - - properties = context.properties - project_id = properties.get('project', context.env['project']) - zone = properties['zone'] - metadata = properties.get('metadata', {'items':[]}) - - configure_haproxy_frontend(properties, metadata) - configure_haproxy_backend(zone, properties, metadata) - configure_haproxy_setup(metadata) - - service_account = properties['serviceAccountEmail'] - - load_balancer = { - 'name': context.env['name'], - 'type': 'instance.py', - 'properties': - { - 'name': properties.get('name', context.env['name']), - 'project': project_id, - 'machineType': properties['machineType'], - 'diskImage': DISK_IMAGE, - 'zone': zone, - 'network': properties['network'], - 'metadata': metadata, - 'serviceAccounts': [ - { - 'email': service_account, - 'scopes': [ - 'https://www.googleapis.com/auth/compute.readonly' - ] - } - ] - } - } - - return { - 'resources': [load_balancer], - 'outputs': [ - { - 'name': 'internalIp', - 'value': '$(ref.{}.internalIp)'.format(context.env['name']) - }, - { - 'name': 'externalIp', - 'value': '$(ref.{}.externalIp)'.format(context.env['name']) - }, - { - 'name': 'name', - 'value': '$(ref.{}.name)'.format(context.env['name']) - }, - { - 'name': 'selfLink', - 'value': '$(ref.{}.selfLink)'.format(context.env['name']) - }, - { - 'name': 'port', - 'value': properties['loadBalancer']['port'] - } - ] - } diff --git a/dm/templates/haproxy/haproxy.py.schema b/dm/templates/haproxy/haproxy.py.schema deleted file mode 100644 index 3da95b4e864..00000000000 --- a/dm/templates/haproxy/haproxy.py.schema +++ /dev/null @@ -1,142 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: HAProxy load balancer template - author: Sourced Group Inc. - version: 1.0.0 - description: | - Deploys a Compute instance, with an HAProxy installed and configured to - load-balance traffic between instance groups. - -imports: - - path: ../instance/instance.py - name: instance.py - -additionalProperties: false - -required: - - zone - - network - - loadBalancer - - instances - -properties: - name: - type: string - description: A name of a load balancer instance. Resource name would be used if omitted. - project: - type: string - description: | - The project ID of the project containing the service. - network: - type: string - description: | - Name of the network to which a load balancer will be connected; e.g., - 'my-custom-network' or 'default'. - zone: - type: string - description: Availability zone. E.g. 'us-central1-a' - machineType: - type: string - default: n1-standard-1 - description: | - The Compute instance type. E.g., 'n1-standard-1'. For details, see: - https://cloud.google.com/compute/docs/machine-types. - serviceAccountEmail: - type: string - default: default - description: | - A service account email with permissions sufficient to list zones and - instanceGroups instances. - loadBalancer: - type: object - additionalProperties: false - description: Front-end settings for the HAProxy load balancer. - required: - - port - properties: - algorithm: - type: string - default: roundrobin - description: The load balancing algorithm. - enum: - - roundrobin - - leastconn - - source - port: - type: integer - description: | - Number of the port at which the load balancer will receive requests. - mode: - type: string - default: http - description: The type of traffic the HAProxy will be load-balancing. - enum: - - http - - tcp - instances: - type: object - additionalProperties: false - description: Back-end settings of the HAProxy load balancer. - required: - - port - - groups - properties: - port: - type: integer - description: | - Number of the port at which instances will accept requests. - groups: - type: array - uniqItems: true - description: A list of instanceGroups that will be load-balanced. - items: - type: string - description: | - The instance group name. If located in the same availability zone - as a load balancer, a group name is sufficient. If located elsewhere, - the name should have the following format: 'zones/[ZONE]/instanceGroups/[GROUP]'. - refreshIntervalMin: - type: integer - minimum: 1 - maximum: 30 - default: 10 - description: | - Specifies how often the list of instances should be refreshed. - -outputs: - externalIp: - type: string - description: Reference to an external IP address of the load balancer. - internalIp: - type: string - description: Reference to an internal IP address of the load balancer. - name: - type: string - description: The load balancer name. - selfLink: - type: string - description: Link to the load balancer instance. - port: - type: integer - description: | - Number of the port where the HAProxy load balancer will accept requests. - -documentation: - - templates/haproxy/README.md - -examples: - - templates/haproxy/examples/haproxy.yaml - diff --git a/dm/templates/haproxy/tests/integration/haproxy.bats b/dm/templates/haproxy/tests/integration/haproxy.bats deleted file mode 100755 index 5eeb5f15ef4..00000000000 --- a/dm/templates/haproxy/tests/integration/haproxy.bats +++ /dev/null @@ -1,161 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores with dashes in the deployment name. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < "templates/haproxy/tests/integration/${TEST_NAME}.yaml" > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - # Set up instance groups to be load-balanced via HAProxy. - gcloud compute instance-templates create template-${RAND}-1 \ - --no-service-account --no-scopes --machine-type=f1-micro \ - --image-project=debian-cloud --image-family=debian-9 \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - gcloud compute instance-groups managed create group-${RAND}-1 \ - --zone us-central1-a --template template-${RAND}-1 --size 1 \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - gcloud compute instance-groups managed create group-${RAND}-2 \ - --zone us-central1-c --template template-${RAND}-1 --size 1 \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - create_config - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - gcloud compute instance-groups managed delete group-${RAND}-1 \ - --zone us-central1-a --project "${CLOUD_FOUNDATION_PROJECT_ID}" -q - - gcloud compute instance-groups managed delete group-${RAND}-2 \ - --zone us-central1-c --project "${CLOUD_FOUNDATION_PROJECT_ID}" -q - - gcloud compute instance-templates delete template-${RAND}-1 \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -q - - rm -f "${RANDOM_FILE}" - delete_config - fi - - # Per-test teardown steps. -} - - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - run gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config "${CONFIG}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "Status: $status" - echo "Output: $output" - [[ "$status" -eq 0 ]] -} - -@test "Verifying that the HAProxy instance was created in deployment ${DEPLOYMENT_NAME}" { - run gcloud compute instances list --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "Status: $status" - echo "Output: $output" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "ilb-proxy-${RAND}" ]] - - # Enabling OS login for the next tests - run gcloud compute instances add-metadata "ilb-proxy-${RAND}" \ - --metadata enable-oslogin=TRUE \ - --zone us-central1-a \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - echo "Pre-run Status: $status" - echo "Pre-run Output: $output" - - [[ "$status" -eq 0 ]] - - run gcloud compute ssh "ilb-proxy-${RAND}" --zone us-central1-a --tunnel-through-iap \ - --command "echo 'OK' " \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "SSH Status: $status" - echo "SSH Output: $output" - - echo "sleeping 30" - sleep 30 - - [[ "$status" -eq 0 ]] -} - -@test "Verifying that haproxy.cfg was populated with instances and had all properties set" { - - # Wait for the HAProxy instance to be configured. - until gcloud compute instances get-serial-port-output "ilb-proxy-${RAND}" \ - --zone us-central1-a \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" | grep /etc/haproxy/haproxy.cfg; do - echo "sleeping 10" - - sleep 10; - done - - # Verify VM serial output - run gcloud compute ssh "ilb-proxy-${RAND}" --zone us-central1-a --tunnel-through-iap \ - --command "sudo tail -n 15 /etc/haproxy/haproxy.cfg" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "SSH Status: $status" - echo "SSH Output: $output" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "group-${RAND}-1" ]] # has instances from group 1 - [[ "$output" =~ "group-${RAND}-2" ]] # has instances from group 2 - [[ "$output" =~ "mode tcp" ]] # the mode was set - [[ "$output" =~ "balance leastconn" ]] # load labalcing algorithm is set - [[ "$output" =~ ":9999" ]] # Load balancer's port - [[ "$output" =~ ":8888" ]] # Instance group's port -} - -@test "Verifying that update interval was set" { - run gcloud compute ssh "ilb-proxy-${RAND}" --zone us-central1-a --tunnel-through-iap \ - --command "sudo crontab -l" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - echo "Status: $status" - echo "Output: $output" - [[ "$status" -eq 0 ]] - [[ "$output" = "*/15 * * * * /sbin/haproxy-conf-updater" ]] -} - -@test "Deleting deployment" { - run gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" -q \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "Status: $status" - echo "Output: $output" - [[ "$status" -eq 0 ]] -} diff --git a/dm/templates/haproxy/tests/integration/haproxy.yaml b/dm/templates/haproxy/tests/integration/haproxy.yaml deleted file mode 100644 index b1d5d29cb94..00000000000 --- a/dm/templates/haproxy/tests/integration/haproxy.yaml +++ /dev/null @@ -1,29 +0,0 @@ -# Test of the HAProxy template usage. -# -# Variables: -# RAND: a random string used by the testing suite. -# - -imports: - - path: templates/instance/instance.py - name: instance.py - - path: templates/haproxy/haproxy.py - name: haproxy.py - -resources: - - name: ilb-proxy-${RAND} - type: haproxy.py - properties: - zone: us-central1-a - machineType: f1-micro - network: default - loadBalancer: - algorithm: leastconn - port: 9999 - mode: tcp - instances: - port: 8888 - refreshIntervalMin: 15 - groups: - - group-${RAND}-1 - - zones/us-central1-c/instanceGroups/group-${RAND}-2 diff --git a/dm/templates/healthcheck/README.md b/dm/templates/healthcheck/README.md deleted file mode 100644 index b5189aa3409..00000000000 --- a/dm/templates/healthcheck/README.md +++ /dev/null @@ -1,80 +0,0 @@ -# Healthcheck - -This template creates a load balancer healthcheck. - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) -- Create a [network](../network/README.md) -- Grant the [compute.networkAdmin](https://cloud.google.com/compute/docs/access/iam) - IAM role to the project service account - -## Deployment - -### Resources - -Depend on the specified healthcheck type. - -#### Legacy Healthchecks - -- [compute.v1.httpHealthCheck](https://cloud.google.com/sdk/gcloud/reference/compute/health-checks/create/http) -- [compute.v1.httpsHealthCheck](https://cloud.google.com/sdk/gcloud/reference/compute/health-checks/create/https) - -#### TCP + SSL Healthchecks - -- [compute.v1.healthChecks](https://cloud.google.com/load-balancing/docs/health-check-concepts) - -#### Beta Healthchecks - -- [compute.beta.healthChecks](https://cloud.google.com/sdk/gcloud/reference/beta/compute/health-checks/create/http2) - -### Properties - -See the `properties` section in the schema file(s): - -- [Healthcheck](healthcheck.py.schema) - -### Usage - -1. Clone the [Deployment Manager Samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit): - -```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit -``` - -2. Go to the [dm](../../) directory: - -```shell - cd dm -``` - -3. Copy the example DM config to be used as a model for the deployment; - in this case, [examples/healthcheck.yaml](examples/healthcheck.yaml): - -```shell - cp templates/healthcheck/examples/healthcheck.yaml my_healthcheck.yaml -``` - -4. Change the values in the config file to match your specific GCP setup: - -```shell - vim my_healthcheck.yaml # <== change values to match your GCP setup -``` - -5. Create your deployment: - -```shell - gcloud deployment-manager deployments create \ - --config my_healthcheck.yaml -``` - -6. In case you need to delete your deployment: - -```shell - gcloud deployment-manager deployments delete -``` - -## Examples - -- [Healthcheck](examples/healthcheck.yaml) \ No newline at end of file diff --git a/dm/templates/healthcheck/examples/healthcheck.yaml b/dm/templates/healthcheck/examples/healthcheck.yaml deleted file mode 100644 index c44e3ee9284..00000000000 --- a/dm/templates/healthcheck/examples/healthcheck.yaml +++ /dev/null @@ -1,128 +0,0 @@ -# Example of the healthcheck template usage. - -imports: -- path: templates/healthcheck/healthcheck.py - name: healthcheck.py - -resources: -- name: my-legacy-http-healthcheck-local - type: healthcheck.py - properties: - description: My Compute Engine healthcheck. - checkIntervalSec: 5 - timeoutSec: 5 - unhealthyThreshold: 2 - healthyThreshold: 2 - host: my-host.testing - port: 80 - healthcheckType: HTTP -- name: my-legacy-https-healthcheck-local - type: healthcheck.py - properties: - description: My Compute Engine healthcheck. - checkIntervalSec: 5 - timeoutSec: 5 - unhealthyThreshold: 2 - healthyThreshold: 2 - port: 80 - healthcheckType: HTTPS -- name: my-beta-http-healthcheck-local - type: healthcheck.py - properties: - description: My Compute Engine healthcheck. - checkIntervalSec: 5 - timeoutSec: 5 - unhealthyThreshold: 2 - healthyThreshold: 2 - host: my-host.testing - port: 80 - healthcheckType: HTTP - response: my-response - version: beta -- name: my-beta-https-healthcheck-local - type: healthcheck.py - properties: - description: My Compute Engine healthcheck. - checkIntervalSec: 5 - timeoutSec: 5 - unhealthyThreshold: 2 - healthyThreshold: 2 - host: my-host.testing - port: 80 - healthcheckType: HTTPS - response: my-response - version: beta -- name: my-beta-http2-healthcheck-local - type: healthcheck.py - properties: - description: My Compute Engine healthcheck. - checkIntervalSec: 5 - timeoutSec: 5 - unhealthyThreshold: 2 - healthyThreshold: 2 - port: 80 - healthcheckType: HTTP2 - version: beta -- name: my-tcp-healthcheck-local - type: healthcheck.py - properties: - description: My Compute Engine healthcheck. - checkIntervalSec: 5 - timeoutSec: 5 - unhealthyThreshold: 2 - healthyThreshold: 2 - port: 80 - healthcheckType: TCP -- name: my-ssl-healthcheck-local - type: healthcheck.py - properties: - description: My Compute Engine healthcheck. - checkIntervalSec: 5 - timeoutSec: 5 - unhealthyThreshold: 2 - healthyThreshold: 2 - port: 80 - healthcheckType: SSL -- name: my-beta-tcp-healthcheck - type: healthcheck.py - properties: - description: My Compute Engine beta TCP healthcheck. - checkIntervalSec: 5 - timeoutSec: 5 - unhealthyThreshold: 2 - healthyThreshold: 2 - port: 80 - healthcheckType: TCP -- name: my-beta-ssl-healthcheck - type: healthcheck.py - properties: - description: My Compute Engine beta SSL healthcheck. - checkIntervalSec: 5 - timeoutSec: 5 - unhealthyThreshold: 2 - healthyThreshold: 2 - port: 80 - healthcheckType: SSL -- name: my-requestpath-healthcheck-local - type: healthcheck.py - properties: - description: My Compute Engine healthcheck. - checkIntervalSec: 5 - timeoutSec: 5 - unhealthyThreshold: 2 - healthyThreshold: 2 - proxyHeader: PROXY_V1 - requestPath: /health.html - healthcheckType: HTTPS -- name: my-response-healthcheck-local - type: healthcheck.py - properties: - description: My Compute Engine healthcheck. - checkIntervalSec: 5 - timeoutSec: 5 - unhealthyThreshold: 2 - healthyThreshold: 2 - proxyHeader: PROXY_V1 - healthcheckType: TCP - request: request-data - response: response-data diff --git a/dm/templates/healthcheck/healthcheck.py b/dm/templates/healthcheck/healthcheck.py deleted file mode 100644 index 9c7b5fa4bed..00000000000 --- a/dm/templates/healthcheck/healthcheck.py +++ /dev/null @@ -1,158 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""This template creates an instance healthcheck.""" - - -def set_if_exists(healthcheck, properties, prop): - """ - If prop exists in properties, set the healthcheck's property to it. - Input: [dict] healthcheck: a dictionary representing a healthcheck object - [dict] properties: a dictionary of the user supplied values - [string] prop: the value to check if exists within properties - - """ - if prop in properties: - healthcheck[prop] = properties[prop] - -def generate_config(context): - """ Entry point for the deployment resources. """ - - resources = [] - properties = context.properties - healthcheck_name = properties.get('name', context.env['name']) - healthcheck_type = properties['healthcheckType'] - healthcheck_version = properties.get('version', 'v1') - - project_id = properties.get('project', context.env['project']) - - # Deployment Manager resource types per healthcheck type. - healthcheck_type_dictionary = { - 'HTTP-Legacy': - { - 'v1': 'gcp-types/compute-v1:httpHealthChecks', - 'beta': 'gcp-types/compute-beta:httpHealthChecks' - }, - 'HTTPS-Legacy': - { - 'v1': 'gcp-types/compute-v1:httpsHealthChecks', - 'beta': 'gcp-types/compute-beta:httpsHealthChecks' - }, - 'HTTP': - { - 'v1': 'gcp-types/compute-v1:healthChecks', - 'beta': 'gcp-types/compute-beta:healthChecks' - }, - 'HTTPS': - { - 'v1': 'gcp-types/compute-v1:healthChecks', - 'beta': 'gcp-types/compute-beta:healthChecks' - }, - 'SSL': - { - 'v1': 'gcp-types/compute-v1:healthChecks', - 'beta': 'gcp-types/compute-beta:healthChecks' - }, - 'TCP': - { - 'v1': 'gcp-types/compute-v1:healthChecks', - 'beta': 'gcp-types/compute-beta:healthChecks' - }, - 'HTTP2': { - 'beta': 'gcp-types/compute-beta:healthChecks' - } - } - - # Deployment Manager object types associated with each type of healthcheck. - healthcheck_object_dictionary = { - 'HTTP-Legacy': 'httpHealthCheck', - 'HTTPS-Legacy': 'httpsHealthCheck', - 'HTTP': 'httpHealthCheck', - 'HTTPS': 'httpsHealthCheck', - 'SSL': 'sslHealthCheck', - 'TCP': 'tcpHealthCheck', - 'HTTP2': 'http2HealthCheck' - } - - # Create a generic healthcheck object. - healthcheck = { - 'name': - context.env['name'], - 'type': - healthcheck_type_dictionary[healthcheck_type][healthcheck_version] - } - - # Create the generic healthcheck properties separately. - healthcheck_properties = { - 'checkIntervalSec': properties['checkIntervalSec'], - 'timeoutSec': properties['timeoutSec'], - 'unhealthyThreshold': properties['unhealthyThreshold'], - 'healthyThreshold': properties['healthyThreshold'], - 'kind': 'compute#healthCheck', - 'type': healthcheck_type, - 'project': project_id, - 'name': healthcheck_name, - } - - set_if_exists(healthcheck_properties, properties, 'description') - - # Create a specific healthcheck object. - specific_healthcheck_type = healthcheck_object_dictionary[healthcheck_type] - specific_healthcheck = { - 'proxyHeader': properties.get('proxyHeader', - 'NONE'), - } - - set_if_exists(specific_healthcheck, properties, 'port') - - # Check for beta-specific properties. - # Add them to the specific healthcheck object. - if healthcheck_version == 'beta': - for prop in ['portSpecification', 'portName', 'response']: - set_if_exists(specific_healthcheck, properties, prop) - - # Check for HTTP/S/2-specific properties. - # Add them to the generic healthcheck. - if healthcheck_type in ['HTTP', 'HTTPS', 'HTTP2']: - for prop in ['requestPath', 'host']: - set_if_exists(specific_healthcheck, properties, prop) - elif healthcheck_type in ['HTTP-Legacy' ,'HTTPS-Legacy']: - for prop in ['requestPath', 'host']: - set_if_exists(healthcheck_properties, properties, prop) - - # Check for TCP/SSL-specific properties. - # Add them to the specific healthcheck object. - if healthcheck_type in ['TCP', 'SSL']: - for prop in ['request', 'response']: - set_if_exists(specific_healthcheck, properties, prop) - - healthcheck_properties[specific_healthcheck_type] = specific_healthcheck - healthcheck['properties'] = healthcheck_properties - resources.append(healthcheck) - - outputs = [ - { - 'name': 'name', - 'value': '$(ref.{}.name)'.format(context.env['name']) - }, - { - 'name': 'selfLink', - 'value': '$(ref.{}.selfLink)'.format(context.env['name']) - }, - { - 'name': 'creationTimestamp', - 'value': '$(ref.{}.creationTimestamp)'.format(context.env['name']) - } - ] - - return {'resources': resources, 'outputs': outputs} diff --git a/dm/templates/healthcheck/healthcheck.py.schema b/dm/templates/healthcheck/healthcheck.py.schema deleted file mode 100644 index 447f308d560..00000000000 --- a/dm/templates/healthcheck/healthcheck.py.schema +++ /dev/null @@ -1,189 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Healthcheck - author: Sourced Group Inc. - version: 1.1.0 - description: | - Creates a Healthcheck resource. - - For more information on this resource: - https://cloud.google.com/load-balancing/docs/health-checks. - - APIs endpoints used by this template: - - gcp-types/compute-v1:httpHealthChecks => - https://cloud.google.com/compute/docs/reference/rest/v1/httpHealthChecks - - gcp-types/compute-v1:httpsHealthChecks => - https://cloud.google.com/compute/docs/reference/rest/v1/httpsHealthChecks - - gcp-types/compute-v1:healthChecks => - https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks - - gcp-types/compute-beta:httpHealthChecks => - https://cloud.google.com/compute/docs/reference/rest/beta/httpHealthChecks - - gcp-types/compute-beta:httpsHealthChecks => - https://cloud.google.com/compute/docs/reference/rest/beta/httpsHealthChecks - - gcp-types/compute-beta:healthChecks => - https://cloud.google.com/compute/docs/reference/rest/beta/healthChecks - -additionalProperties: false - -required: - - healthcheckType - -properties: - name: - type: string - description: | - Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, - and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular - expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all - following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - project: - type: string - description: | - The project ID of the project containing the Cloud Router instance. The - Google apps domain is prefixed if applicable. - description: - type: string - description: | - An optional description of this resource. Provide this property when you create the resource. - checkIntervalSec: - type: integer - default: 5 - description: Defines how often (in seconds) to perform a healthcheck. - timeoutSec: - type: integer - default: 5 - description: | - Defines how long (in seconds) to wait before declaring failure. - The timeoutSec value must be <= checkIntervalSec. - unhealthyThreshold: - type: integer - default: 2 - description: | - The number of consecutive failures after which a formerly - healthy instance is marked unhealthy. - healthyThreshold: - type: integer - default: 2 - description: | - The number of consecutive successes after which a formerly unhealthy - instance is marked healthy. - host: - type: string - description: | - The value of the host header in the HTTP health check request. - The default value is the public IP of the host being checked. - port: - type: integer - minimum: 1 - maximum: 65535 - description: | - The TCP port number for the health check request. - The default value depends on the healthcheck type: 80 for HTTP/2 + TCP; - 443 for HTTPS + SSL. - healthcheckType: - type: string - description: The healthcheck type. - enum: - - HTTP - - HTTP-Legacy - - HTTPS - - HTTPS-Legacy - - HTTP2 - - SSL - - TCP - version: - type: string - default: v1 - description: | - The healthcheck version to use: Beta or V1. - All healthcheck types, except for HTTP2, support both options. - HTTP2 supports only Beta. - enum: - - beta - - v1 - proxyHeader: - type: string - default: NONE - description: | - The type of the proxy header to append before sending data to the back - end. - enum: - - PROXY_V1 - - NONE - requestPath: - type: string - default: / - description: | - The request path of the HTTP health check request. - Can only be used with HTTP + HTTPS + HTTP2 healthchecks. - request: - type: string - description: | - The application data to send once the SSL connection has been - established (the default value is empty). If both request and response - are empty, the connection establishment alone indicates health. - The request data can only be ASCII - Note: used only with TCP + SSL. - response: - type: string - description: | - The bytes to match against the beginning of the response data. - If left empty (the default value), any response will indicate health. - The response data can only be ASCII. - Note: Used only with TCP + SSL except in beta. In beta, response is - available to all healthcheck types. - portName: - type: string - description: | - The port name as defined in InstanceGroup#NamedPort#name. If both port - and port_name are defined, port takes precedence. - Note: Used only in beta. - portSpecification: - type: string - description: | - Defines how port is selected for health checking. One of the - following values: - USE_FIXED_PORT: The port number in {port} is used for health checking. - USE_NAMED_PORT: The {portName} is used for health checking. - USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each - network endpoint is used for health checking. - For other backends, the port or named port specified in the Backend - Service is used for health checking. - If not specified, healthcheck follows behavior specified in the {port} - and {portName} fields. - Used only in beta for all types. - enum: - - USE_FIXED_PORT - - USE_NAMED_PORT - - USE_SERVING_PORT - -outputs: - name: - type: string - description: The HealthCheck resource name. - selfLink: - type: string - description: The Deployment Manager-defined URL for the resource. - creationTimestamp: - type: datetime - description: | - The resource creation timestamp in the RFC3339 text format. - -documentation: - - templates/healthcheck/README.md - -examples: - - templates/healthcheck/examples/healthcheck.yaml diff --git a/dm/templates/healthcheck/tests/.gitignore b/dm/templates/healthcheck/tests/.gitignore deleted file mode 100644 index 51b4cfda113..00000000000 --- a/dm/templates/healthcheck/tests/.gitignore +++ /dev/null @@ -1 +0,0 @@ -local/ diff --git a/dm/templates/healthcheck/tests/integration/cloudbuild-schema.yaml b/dm/templates/healthcheck/tests/integration/cloudbuild-schema.yaml deleted file mode 100644 index 6f3bd22cd91..00000000000 --- a/dm/templates/healthcheck/tests/integration/cloudbuild-schema.yaml +++ /dev/null @@ -1,4 +0,0 @@ -steps: -- name: gcr.io/$PROJECT_ID/cft-schema - args: ['./templates/healthcheck/examples/healthcheck.yaml'] -tags: ['cft-dm-schema-runner'] diff --git a/dm/templates/healthcheck/tests/integration/healthcheck.bats b/dm/templates/healthcheck/tests/integration/healthcheck.bats deleted file mode 100644 index 5c44c574805..00000000000 --- a/dm/templates/healthcheck/tests/integration/healthcheck.bats +++ /dev/null @@ -1,308 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" - export RESOURCE_NAME_PREFIX="test-healthcheck-${RAND}" - export CHECK_INTERVAL_SEC="5" - export TIMEOUT_SEC="5" - export UNHEALTHY_THRESHOLD="2" - export HEALTHY_THRESHOLD="2" - export PORT_80="80" - export PORT_443="443" -fi -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < "templates/healthcheck/tests/integration/${TEST_NAME}.yaml" > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; this is executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; this is executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - delete_config - rm -f ${RANDOM_FILE} - fi - - # Per-test teardown steps. -} - - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config "${CONFIG}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" -} - -@test "Legacy HTTP healthcheck was created" { - RESOURCE_NAME=${RESOURCE_NAME_PREFIX}-legacy-http - run gcloud compute http-health-checks describe ${RESOURCE_NAME} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "checkIntervalSec: ${CHECK_INTERVAL_SEC}" ]] - [[ "$output" =~ "timeoutSec: ${TIMEOUT_SEC}" ]] - [[ "$output" =~ "unhealthyThreshold: ${UNHEALTHY_THRESHOLD}" ]] - [[ "$output" =~ "healthyThreshold: ${HEALTHY_THRESHOLD}" ]] - [[ "$output" =~ "port: ${PORT_80}" ]] -} - -@test "Legacy HTTPS healthcheck was created" { - RESOURCE_NAME=${RESOURCE_NAME_PREFIX}-legacy-https - run gcloud compute https-health-checks describe ${RESOURCE_NAME}\ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "checkIntervalSec: ${CHECK_INTERVAL_SEC}" ]] - [[ "$output" =~ "timeoutSec: ${TIMEOUT_SEC}" ]] - [[ "$output" =~ "unhealthyThreshold: ${UNHEALTHY_THRESHOLD}" ]] - [[ "$output" =~ "healthyThreshold: ${HEALTHY_THRESHOLD}" ]] - [[ "$output" =~ "port: ${PORT_443}" ]] -} - -@test "TCP healthcheck was created" { - RESOURCE_NAME=${RESOURCE_NAME_PREFIX}-tcp - run gcloud compute health-checks describe ${RESOURCE_NAME} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "checkIntervalSec: ${CHECK_INTERVAL_SEC}" ]] - [[ "$output" =~ "timeoutSec: ${TIMEOUT_SEC}" ]] - [[ "$output" =~ "unhealthyThreshold: ${UNHEALTHY_THRESHOLD}" ]] - [[ "$output" =~ "healthyThreshold: ${HEALTHY_THRESHOLD}" ]] - [[ "$output" =~ "port: ${PORT_80}" ]] - [[ "$output" =~ "type: TCP" ]] -} - -@test "SSL healthcheck was created" { - RESOURCE_NAME=${RESOURCE_NAME_PREFIX}-ssl - run gcloud compute health-checks describe ${RESOURCE_NAME} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "checkIntervalSec: ${CHECK_INTERVAL_SEC}" ]] - [[ "$output" =~ "timeoutSec: ${TIMEOUT_SEC}" ]] - [[ "$output" =~ "unhealthyThreshold: ${UNHEALTHY_THRESHOLD}" ]] - [[ "$output" =~ "healthyThreshold: ${HEALTHY_THRESHOLD}" ]] - [[ "$output" =~ "port: ${PORT_80}" ]] - [[ "$output" =~ "type: SSL" ]] -} - -@test "Request path legacy healthcheck was created" { - RESOURCE_NAME=${RESOURCE_NAME_PREFIX}-requestpath-legacy-https - run gcloud compute https-health-checks describe ${RESOURCE_NAME}\ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "checkIntervalSec: ${CHECK_INTERVAL_SEC}" ]] - [[ "$output" =~ "timeoutSec: ${TIMEOUT_SEC}" ]] - [[ "$output" =~ "unhealthyThreshold: ${UNHEALTHY_THRESHOLD}" ]] - [[ "$output" =~ "healthyThreshold: ${HEALTHY_THRESHOLD}" ]] - [[ "$output" =~ "requestPath: /health.html" ]] - [[ "$output" =~ "port: ${PORT_443}" ]] -} - -@test "Request path healthcheck was created" { - RESOURCE_NAME=${RESOURCE_NAME_PREFIX}-requestpath-https - run gcloud compute health-checks describe ${RESOURCE_NAME}\ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "checkIntervalSec: ${CHECK_INTERVAL_SEC}" ]] - [[ "$output" =~ "timeoutSec: ${TIMEOUT_SEC}" ]] - [[ "$output" =~ "unhealthyThreshold: ${UNHEALTHY_THRESHOLD}" ]] - [[ "$output" =~ "healthyThreshold: ${HEALTHY_THRESHOLD}" ]] - [[ "$output" =~ "requestPath: /health.html" ]] - [[ "$output" =~ "port: ${PORT_443}" ]] -} - -@test "TCP w/ request/response data was created" { - RESOURCE_NAME=${RESOURCE_NAME_PREFIX}-response-tcp - run gcloud compute health-checks describe ${RESOURCE_NAME}\ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "checkIntervalSec: ${CHECK_INTERVAL_SEC}" ]] - [[ "$output" =~ "timeoutSec: ${TIMEOUT_SEC}" ]] - [[ "$output" =~ "unhealthyThreshold: ${UNHEALTHY_THRESHOLD}" ]] - [[ "$output" =~ "healthyThreshold: ${HEALTHY_THRESHOLD}" ]] - [[ "$output" =~ "port: ${PORT_80}" ]] - [[ "$output" =~ "type: TCP" ]] - [[ "$output" =~ "request: request-data" ]] - [[ "$output" =~ "response: response-data" ]] -} - -@test "Legacy HTTP beta healthcheck was created" { - RESOURCE_NAME=${RESOURCE_NAME_PREFIX}-legacy-beta-http - run gcloud beta compute http-health-checks describe ${RESOURCE_NAME}\ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "checkIntervalSec: ${CHECK_INTERVAL_SEC}" ]] - [[ "$output" =~ "timeoutSec: ${TIMEOUT_SEC}" ]] - [[ "$output" =~ "unhealthyThreshold: ${UNHEALTHY_THRESHOLD}" ]] - [[ "$output" =~ "healthyThreshold: ${HEALTHY_THRESHOLD}" ]] - [[ "$output" =~ "port: ${PORT_80}" ]] -} - -@test "Legacy HTTPS beta healthcheck was created" { - RESOURCE_NAME=${RESOURCE_NAME_PREFIX}-legacy-beta-https - run gcloud beta compute https-health-checks describe ${RESOURCE_NAME}\ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "checkIntervalSec: ${CHECK_INTERVAL_SEC}" ]] - [[ "$output" =~ "timeoutSec: ${TIMEOUT_SEC}" ]] - [[ "$output" =~ "unhealthyThreshold: ${UNHEALTHY_THRESHOLD}" ]] - [[ "$output" =~ "healthyThreshold: ${HEALTHY_THRESHOLD}" ]] - [[ "$output" =~ "port: ${PORT_443}" ]] -} - -@test "HTTPS beta http2 healthcheck was created" { - RESOURCE_NAME=${RESOURCE_NAME_PREFIX}-beta-http2 - run gcloud beta compute health-checks describe --global ${RESOURCE_NAME}\ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "checkIntervalSec: ${CHECK_INTERVAL_SEC}" ]] - [[ "$output" =~ "timeoutSec: ${TIMEOUT_SEC}" ]] - [[ "$output" =~ "unhealthyThreshold: ${UNHEALTHY_THRESHOLD}" ]] - [[ "$output" =~ "healthyThreshold: ${HEALTHY_THRESHOLD}" ]] - [[ "$output" =~ "port: ${PORT_80}" ]] -} - -@test "TCP beta healthcheck was created" { - RESOURCE_NAME=${RESOURCE_NAME_PREFIX}-beta-tcp - run gcloud beta compute health-checks describe --global ${RESOURCE_NAME} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "checkIntervalSec: ${CHECK_INTERVAL_SEC}" ]] - [[ "$output" =~ "timeoutSec: ${TIMEOUT_SEC}" ]] - [[ "$output" =~ "unhealthyThreshold: ${UNHEALTHY_THRESHOLD}" ]] - [[ "$output" =~ "healthyThreshold: ${HEALTHY_THRESHOLD}" ]] - [[ "$output" =~ "port: ${PORT_80}" ]] - [[ "$output" =~ "type: TCP" ]] -} - -@test "SSL beta healthcheck was created" { - RESOURCE_NAME=${RESOURCE_NAME_PREFIX}-beta-ssl - run gcloud beta compute health-checks describe --global ${RESOURCE_NAME} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "checkIntervalSec: ${CHECK_INTERVAL_SEC}" ]] - [[ "$output" =~ "timeoutSec: ${TIMEOUT_SEC}" ]] - [[ "$output" =~ "unhealthyThreshold: ${UNHEALTHY_THRESHOLD}" ]] - [[ "$output" =~ "healthyThreshold: ${HEALTHY_THRESHOLD}" ]] - [[ "$output" =~ "port: ${PORT_80}" ]] - [[ "$output" =~ "type: SSL" ]] -} - -@test "HTTP healthcheck was created" { - RESOURCE_NAME=${RESOURCE_NAME_PREFIX}-http - run gcloud compute health-checks describe ${RESOURCE_NAME} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "checkIntervalSec: ${CHECK_INTERVAL_SEC}" ]] - [[ "$output" =~ "timeoutSec: ${TIMEOUT_SEC}" ]] - [[ "$output" =~ "unhealthyThreshold: ${UNHEALTHY_THRESHOLD}" ]] - [[ "$output" =~ "healthyThreshold: ${HEALTHY_THRESHOLD}" ]] - [[ "$output" =~ "port: ${PORT_80}" ]] -} - -@test "Legacy HTTPS healthcheck was created" { - RESOURCE_NAME=${RESOURCE_NAME_PREFIX}-https - run gcloud compute health-checks describe ${RESOURCE_NAME}\ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "checkIntervalSec: ${CHECK_INTERVAL_SEC}" ]] - [[ "$output" =~ "timeoutSec: ${TIMEOUT_SEC}" ]] - [[ "$output" =~ "unhealthyThreshold: ${UNHEALTHY_THRESHOLD}" ]] - [[ "$output" =~ "healthyThreshold: ${HEALTHY_THRESHOLD}" ]] - [[ "$output" =~ "port: ${PORT_443}" ]] -} - -@test "HTTP beta healthcheck was created" { - RESOURCE_NAME=${RESOURCE_NAME_PREFIX}-beta-http - run gcloud beta compute health-checks describe --global ${RESOURCE_NAME}\ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "checkIntervalSec: ${CHECK_INTERVAL_SEC}" ]] - [[ "$output" =~ "timeoutSec: ${TIMEOUT_SEC}" ]] - [[ "$output" =~ "unhealthyThreshold: ${UNHEALTHY_THRESHOLD}" ]] - [[ "$output" =~ "healthyThreshold: ${HEALTHY_THRESHOLD}" ]] - [[ "$output" =~ "port: ${PORT_80}" ]] -} - -@test "HTTPS beta healthcheck was created" { - RESOURCE_NAME=${RESOURCE_NAME_PREFIX}-beta-https - run gcloud beta compute health-checks describe --global ${RESOURCE_NAME}\ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "checkIntervalSec: ${CHECK_INTERVAL_SEC}" ]] - [[ "$output" =~ "timeoutSec: ${TIMEOUT_SEC}" ]] - [[ "$output" =~ "unhealthyThreshold: ${UNHEALTHY_THRESHOLD}" ]] - [[ "$output" =~ "healthyThreshold: ${HEALTHY_THRESHOLD}" ]] - [[ "$output" =~ "port: ${PORT_443}" ]] -} -@test "Deleting deployment" { - run gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -q - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] -} diff --git a/dm/templates/healthcheck/tests/integration/healthcheck.yaml b/dm/templates/healthcheck/tests/integration/healthcheck.yaml deleted file mode 100644 index e31fc8b31c5..00000000000 --- a/dm/templates/healthcheck/tests/integration/healthcheck.yaml +++ /dev/null @@ -1,189 +0,0 @@ -# Test of the healthcheck template. - -imports: -- path: templates/healthcheck/healthcheck.py - name: healthcheck.py - - -resources: -- name: ${RESOURCE_NAME_PREFIX}-legacy-http - type: healthcheck.py - properties: - description: My Compute Engine legacy HTTP healthcheck. - checkIntervalSec: ${CHECK_INTERVAL_SEC} - timeoutSec: ${TIMEOUT_SEC} - unhealthyThreshold: ${UNHEALTHY_THRESHOLD} - healthyThreshold: ${HEALTHY_THRESHOLD} - host: host.testing - port: ${PORT_80} - healthcheckType: HTTP-Legacy -- name: ${RESOURCE_NAME_PREFIX}-legacy-https - type: healthcheck.py - properties: - description: My Compute Engine legacy HTTPS healthcheck. - checkIntervalSec: ${CHECK_INTERVAL_SEC} - timeoutSec: ${TIMEOUT_SEC} - unhealthyThreshold: ${UNHEALTHY_THRESHOLD} - healthyThreshold: ${HEALTHY_THRESHOLD} - port: ${PORT_443} - healthcheckType: HTTPS-Legacy -- name: ${RESOURCE_NAME_PREFIX}-legacy-beta-http - type: healthcheck.py - properties: - description: My Compute Engine beta HTTP healthcheck. - checkIntervalSec: ${CHECK_INTERVAL_SEC} - timeoutSec: ${TIMEOUT_SEC} - unhealthyThreshold: ${UNHEALTHY_THRESHOLD} - healthyThreshold: ${HEALTHY_THRESHOLD} - host: test-host.testing - port: ${PORT_80} - healthcheckType: HTTP-Legacy - response: test-response - version: beta -- name: ${RESOURCE_NAME_PREFIX}-legacy-beta-https - type: healthcheck.py - properties: - description: My Compute Engine beta HTTPS healthcheck. - checkIntervalSec: ${CHECK_INTERVAL_SEC} - timeoutSec: ${TIMEOUT_SEC} - unhealthyThreshold: ${UNHEALTHY_THRESHOLD} - healthyThreshold: ${HEALTHY_THRESHOLD} - host: test-host.testing - port: ${PORT_443} - healthcheckType: HTTPS-Legacy - response: test-response - version: beta -- name: ${RESOURCE_NAME_PREFIX}-beta-http2 - type: healthcheck.py - properties: - description: My Compute Engine HTTP2 healthcheck. - checkIntervalSec: ${CHECK_INTERVAL_SEC} - timeoutSec: ${TIMEOUT_SEC} - unhealthyThreshold: ${UNHEALTHY_THRESHOLD} - healthyThreshold: ${HEALTHY_THRESHOLD} - port: ${PORT_80} - healthcheckType: HTTP2 - version: beta -- name: ${RESOURCE_NAME_PREFIX}-tcp - type: healthcheck.py - properties: - description: My Compute Engine TCP healthcheck. - checkIntervalSec: ${CHECK_INTERVAL_SEC} - timeoutSec: ${TIMEOUT_SEC} - unhealthyThreshold: ${UNHEALTHY_THRESHOLD} - healthyThreshold: ${HEALTHY_THRESHOLD} - port: ${PORT_80} - healthcheckType: TCP -- name: ${RESOURCE_NAME_PREFIX}-ssl - type: healthcheck.py - properties: - description: My Compute Engine SSL healthcheck. - checkIntervalSec: ${CHECK_INTERVAL_SEC} - timeoutSec: ${TIMEOUT_SEC} - unhealthyThreshold: ${UNHEALTHY_THRESHOLD} - healthyThreshold: ${HEALTHY_THRESHOLD} - port: ${PORT_80} - healthcheckType: SSL -- name: ${RESOURCE_NAME_PREFIX}-beta-tcp - type: healthcheck.py - properties: - description: My Compute Engine beta TCP healthcheck. - checkIntervalSec: ${CHECK_INTERVAL_SEC} - timeoutSec: ${TIMEOUT_SEC} - unhealthyThreshold: ${UNHEALTHY_THRESHOLD} - healthyThreshold: ${HEALTHY_THRESHOLD} - port: ${PORT_80} - healthcheckType: TCP -- name: ${RESOURCE_NAME_PREFIX}-beta-ssl - type: healthcheck.py - properties: - description: My Compute Engine beta SSL healthcheck. - checkIntervalSec: ${CHECK_INTERVAL_SEC} - timeoutSec: ${TIMEOUT_SEC} - unhealthyThreshold: ${UNHEALTHY_THRESHOLD} - healthyThreshold: ${HEALTHY_THRESHOLD} - port: ${PORT_80} - healthcheckType: SSL -- name: ${RESOURCE_NAME_PREFIX}-requestpath-legacy-https - type: healthcheck.py - properties: - description: My Compute Engine requestpath healthcheck. - checkIntervalSec: ${CHECK_INTERVAL_SEC} - timeoutSec: ${TIMEOUT_SEC} - unhealthyThreshold: ${UNHEALTHY_THRESHOLD} - healthyThreshold: ${HEALTHY_THRESHOLD} - proxyHeader: PROXY_V1 - requestPath: /health.html - healthcheckType: HTTPS-Legacy -- name: ${RESOURCE_NAME_PREFIX}-requestpath-https - type: healthcheck.py - properties: - description: My Compute Engine requestpath healthcheck. - checkIntervalSec: ${CHECK_INTERVAL_SEC} - timeoutSec: ${TIMEOUT_SEC} - unhealthyThreshold: ${UNHEALTHY_THRESHOLD} - healthyThreshold: ${HEALTHY_THRESHOLD} - proxyHeader: PROXY_V1 - requestPath: /health.html - healthcheckType: HTTPS - port: ${PORT_443} -- name: ${RESOURCE_NAME_PREFIX}-response-tcp - type: healthcheck.py - properties: - description: My Compute Engine response healthcheck. - checkIntervalSec: ${CHECK_INTERVAL_SEC} - timeoutSec: ${TIMEOUT_SEC} - unhealthyThreshold: ${UNHEALTHY_THRESHOLD} - healthyThreshold: ${HEALTHY_THRESHOLD} - proxyHeader: PROXY_V1 - port: ${PORT_80}80 - healthcheckType: TCP - request: request-data - response: response-data -- name: ${RESOURCE_NAME_PREFIX}-http - type: healthcheck.py - properties: - description: My Compute Engine legacy HTTP healthcheck. - checkIntervalSec: ${CHECK_INTERVAL_SEC} - timeoutSec: ${TIMEOUT_SEC} - unhealthyThreshold: ${UNHEALTHY_THRESHOLD} - healthyThreshold: ${HEALTHY_THRESHOLD} - host: host.testing - port: ${PORT_80} - healthcheckType: HTTP -- name: ${RESOURCE_NAME_PREFIX}-https - type: healthcheck.py - properties: - description: My Compute Engine legacy HTTPS healthcheck. - checkIntervalSec: ${CHECK_INTERVAL_SEC} - timeoutSec: ${TIMEOUT_SEC} - unhealthyThreshold: ${UNHEALTHY_THRESHOLD} - healthyThreshold: ${HEALTHY_THRESHOLD} - port: ${PORT_443} - healthcheckType: HTTPS -- name: ${RESOURCE_NAME_PREFIX}-beta-http - type: healthcheck.py - properties: - description: My Compute Engine beta HTTP healthcheck. - checkIntervalSec: ${CHECK_INTERVAL_SEC} - timeoutSec: ${TIMEOUT_SEC} - unhealthyThreshold: ${UNHEALTHY_THRESHOLD} - healthyThreshold: ${HEALTHY_THRESHOLD} - host: test-host.testing - port: ${PORT_80} - healthcheckType: HTTP - response: test-response - version: beta -- name: ${RESOURCE_NAME_PREFIX}-beta-https - type: healthcheck.py - properties: - description: My Compute Engine beta HTTPS healthcheck. - checkIntervalSec: ${CHECK_INTERVAL_SEC} - timeoutSec: ${TIMEOUT_SEC} - unhealthyThreshold: ${UNHEALTHY_THRESHOLD} - healthyThreshold: ${HEALTHY_THRESHOLD} - host: test-host.testing - port: ${PORT_443} - healthcheckType: HTTPS - response: test-response - version: beta diff --git a/dm/templates/iam_custom_role/README.md b/dm/templates/iam_custom_role/README.md deleted file mode 100644 index 80bb3eedf37..00000000000 --- a/dm/templates/iam_custom_role/README.md +++ /dev/null @@ -1,68 +0,0 @@ -# Custom IAM Role - -This template creates a custom IAM role for an organization or a project. - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) -- Grant the [iam.roleAdmin, iam.organizationRoleAdmin or owner](https://cloud.google.com/iam/docs/understanding-custom-roles#required_permissions_and_roles) IAM role to the project service account - -## Deployment - -### Resources - -- [Creating custom IAM roles](https://cloud.google.com/iam/docs/creating-custom-roles) -- [gcp-types/iam-v1:organizations.roles](https://cloud.google.com/iam/reference/rest/v1/organizations.roles/create) -- [gcp-types/iam-v1:projects.roles](https://cloud.google.com/iam/reference/rest/v1/projects.roles/create) - -### Properties - -See `properties` section in the schema file(s): - -- [Organization](organization_custom_role.py.schema) -- [Project](project_custom_role.py.schema) - - -### Usage - -1. Clone the [Deployment Manager samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit): - -```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit -``` - -2. Go to the [dm](../../) directory: - -```shell - cd dm -``` - -3. Copy the example DM config to be used as a model for the deployment; in this case, [examples/iam\_custom\_role.yaml](examples/iam_custom_role.yaml): - -```shell - cp templates/iam_custom_role/examples/iam_custom_role.yaml my_iamcustomrole.yaml -``` - -4. Change the values in the config file to match your specific GCP setup (for properties, refer to the schema files listed above): - -```shell - vim my_iamcustomrole.yaml # <== change values to match your GCP setup -``` - -5. Create your deployment (replace with the relevant deployment name): - -```shell - gcloud deployment-manager deployments create \ - --config my_iamcustomrole.yaml -``` - -6. In case you need to delete your deployment: - -```shell - gcloud deployment-manager deployments delete -``` - -## Examples - -- [Custom IAM role](examples/iam_custom_role.yaml) diff --git a/dm/templates/iam_custom_role/examples/iam_custom_role.yaml b/dm/templates/iam_custom_role/examples/iam_custom_role.yaml deleted file mode 100644 index 750fdc3c2ed..00000000000 --- a/dm/templates/iam_custom_role/examples/iam_custom_role.yaml +++ /dev/null @@ -1,38 +0,0 @@ -# Example of the IAM custom role template usage. -# -# In this example, two custom roles are created: -# 1- a custom organizational role (test-custom-org-role) -# 2- a custom project role (test-project-role) -# -# Replace the following placeholders with valid values: -# : the organization id -# : a unique ID for the custom organization role -# : a unique ID for the custom project role -# - -imports: - - path: templates/iam_custom_role/project_custom_role.py - name: project_custom_role.py - - path: templates/iam_custom_role/organization_custom_role.py - name: organization_custom_role.py - -resources: - - name: example-custom-org-role - type: organization_custom_role.py - properties: - orgId: - roleId: - title: My Org Role Title - description: My Org Role Description - includedPermissions: - - iam.roles.get - - iam.roles.list - - name: example-project-custom-role - type: project_custom_role.py - properties: - roleId: - title: My Project Role Title - description: My Project Role Description - includedPermissions: - - iam.roles.get - - iam.roles.list diff --git a/dm/templates/iam_custom_role/organization_custom_role.py b/dm/templates/iam_custom_role/organization_custom_role.py deleted file mode 100644 index 3062da90962..00000000000 --- a/dm/templates/iam_custom_role/organization_custom_role.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""This template creates a custom IAM Organization role.""" - - -def generate_config(context): - """ Entry point for the deployment resources. """ - - properties = context.properties - org_id = str(properties['orgId']) - included_permissions = properties['includedPermissions'] - - role = { - 'includedPermissions': included_permissions, - # Default the stage to General Availability. - 'stage': properties.get('stage') - } - - title = properties.get('title') - if title: - role['title'] = title - - description = properties.get('description') - if description: - role['description'] = description - - resources = [ - { - 'name': context.env['name'], - # https://cloud.google.com/iam/reference/rest/v1/organizations.roles - 'type': 'gcp-types/iam-v1:organizations.roles', - 'properties': - { - 'parent': 'organizations/' + org_id, - 'roleId': properties['roleId'], - 'role': role - } - } - ] - - return {'resources': resources} diff --git a/dm/templates/iam_custom_role/organization_custom_role.py.schema b/dm/templates/iam_custom_role/organization_custom_role.py.schema deleted file mode 100644 index 0b23b2a56f0..00000000000 --- a/dm/templates/iam_custom_role/organization_custom_role.py.schema +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Custom IAM Role (Organization Level) - author: Sourced Group Inc. - version: 1.0.0 - description: | - Creates a custom organization-level IAM role under the specified organization ID. - For more information on this resource: - https://cloud.google.com/iam/docs/understanding-custom-roles. - - To deploy custom organization-level roles, your project needs the Identity - and Access Management (IAM) API to be enabled. You may also need to grant - additional roles to the default Google APIs service account. - See [Best Practices](https://cloud.google.com/deployment-manager/docs/best-practices/#permissions) for more information. - - After a role has been deleted, its bindings remain, in the inactive status. - The role can be undeleted within 7 days of deletion. After 7 days, the role is picked by a - permanent deletion process that lasts 30 days. During this process, the - role and all its bindings are permanently removed, and you cannot create a new role - with the same role ID. After the role has been permanently deleted -- which - takes 37 days from the initial deletion request -- a new role can be created using - the deleted role's ID. - - APIs endpoints used by this template: - - gcp-types/iam-v1:organizations.roles => - https://cloud.google.com/iam/reference/rest/v1/organizations.roles - -additionalProperties: false - -required: - - orgId - - roleId - - includedPermissions - -properties: - orgId: - type: string - pattern: ^[0-9]{8,25}$ - description: | - The ID of the organization that the projects will be under. Typically a - base-10 string representation of integer. - roleId: - type: string - pattern: ^[a-zA-Z][0-9a-zA-Z]{7,63}$ - description: | - A unique ID of the custom role. - title: - type: string - description: | - The title of the custom role. - description: - type: string - description: | - Description of the custom role. - stage: - type: string - description: | - The current launch stage of the role. If the ALPHA launch stage has been selected for a role, - the stage field will not be included in the returned definition for the role. - default: GA - enum: - - ALPHA - - BETA - - GA - - DEPRECATED - - DISABLED - - EAP - includedPermissions: - type: array - uniqueItems: true - default: [] - description: | - Permissions that the custom role includes. - -documentation: - - templates/iam_custom_role/README.md - -examples: - - templates/iam_custom_role/examples/iam_custom_role.yaml diff --git a/dm/templates/iam_custom_role/project_custom_role.py b/dm/templates/iam_custom_role/project_custom_role.py deleted file mode 100644 index 4a75f2f8b2f..00000000000 --- a/dm/templates/iam_custom_role/project_custom_role.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""This template creates a custom IAM Project role.""" - - -def generate_config(context): - """ Entry point for the deployment resources. """ - - properties = context.properties - included_permissions = properties['includedPermissions'] - project_id = properties.get('project', context.env['project']) - - role = { - 'includedPermissions': included_permissions, - # Default the stage to General Availability. - 'stage': properties.get('stage') - } - - title = properties.get('title') - if title: - role['title'] = title - - description = properties.get('description') - if description: - role['description'] = description - - resources = [ - { - 'name': context.env['name'], - # https://cloud.google.com/iam/reference/rest/v1/projects.roles - 'type': 'gcp-types/iam-v1:projects.roles', - 'properties': - { - 'parent': 'projects/' + project_id, - 'roleId': properties['roleId'], - 'role': role - } - } - ] - - return {'resources': resources} diff --git a/dm/templates/iam_custom_role/project_custom_role.py.schema b/dm/templates/iam_custom_role/project_custom_role.py.schema deleted file mode 100644 index cabcde2037b..00000000000 --- a/dm/templates/iam_custom_role/project_custom_role.py.schema +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Custom IAM Role (Project Level) - author: Sourced Group Inc. - version: 1.0.0 - description: | - Creates a custom project-level IAM role under the specified organization ID. - For more information on this resource: - https://cloud.google.com/iam/reference/rest/v1/projects.roles/create - - To deploy custom project-level roles, your project needs the Identity - and Access Management (IAM) API to be enabled. You may also need to grant - additional roles to the default Google APIs service account. - See [Best Practices](https://cloud.google.com/deployment-manager/docs/best-practices/#permissions) for more information. - - After a role has been deleted, its bindings remain, in the inactive status. - The role can be undeleted within 7 days of deletion. After 7 days, the role is picked by a - permanent deletion process that lasts 30 days. During this process, the - role and all its bindings are permanently removed, and you cannot create a new role - with the same role ID. After the role has been permanently deleted -- which - takes 37 days from the initial deletion request -- a new role can be created using - the deleted role's ID. - - APIs endpoints used by this template: - - gcp-types/iam-v1:projects.roles => - https://cloud.google.com/iam/reference/rest/v1/projects.roles - -additionalProperties: false - -required: - - roleId - - includedPermissions - -properties: - project: - type: string - description: | - The project ID of the project to modify. - roleId: - type: string - pattern: ^[a-zA-Z][0-9a-zA-Z]{7,63}$ - description: | - A unique ID of the custom role. - title: - type: string - description: | - The title of the custom role. - description: - type: string - description: | - Description of the custom role. - stage: - type: string - description: | - The current launch stage of the role. If the ALPHA launch stage has been selected for a role, - the stage field will not be included in the returned definition for the role. - default: GA - enum: - - ALPHA - - BETA - - GA - - DEPRECATED - - DISABLED - - EAP - includedPermissions: - type: array - uniqueItems: true - default: [] - description: | - Permissions that the custom role includes. - -documentation: - - templates/iam_custom_role/README.md - -examples: - - templates/iam_custom_role/examples/iam_custom_role.yaml diff --git a/dm/templates/iam_custom_role/tests/integration/iam_custom_role.bats b/dm/templates/iam_custom_role/tests/integration/iam_custom_role.bats deleted file mode 100644 index 5a6928501d6..00000000000 --- a/dm/templates/iam_custom_role/tests/integration/iam_custom_role.bats +++ /dev/null @@ -1,82 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < "templates/iam_custom_role/tests/integration/${TEST_NAME}.yaml" > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; this is executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; this is executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - delete_config - rm -f ${RANDOM_FILE} - fi - - # Per-test teardown steps. -} - - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" --config "${CONFIG}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -} - -@test "Verifying that project iam roles were created in deployment ${DEPLOYMENT_NAME}" { - run gcloud iam roles list --project "${CLOUD_FOUNDATION_PROJECT_ID}" --filter="name:myCustomProjectRole${RAND}" - [ "$status" -eq 0 ] - [[ "$output" =~ "description: My Project Role Description" ]] - [[ "$output" =~ "name: projects/${CLOUD_FOUNDATION_PROJECT_ID}/roles/myCustomProjectRole${RAND}" ]] - [[ "$output" =~ "stage: GA" ]] - [[ "$output" =~ "title: My Project Role Title" ]] -} - -@test "Verifying that organizational iam roles were created in deployment ${DEPLOYMENT_NAME}" { - run gcloud iam roles list --organization "${CLOUD_FOUNDATION_ORGANIZATION_ID}" --filter="name:myCustomOrgRole${RAND}" - [ "$status" -eq 0 ] - [[ "$output" =~ "description: My Org Role Description" ]] - [[ "$output" =~ "name: organizations/${CLOUD_FOUNDATION_ORGANIZATION_ID}/roles/myCustomOrgRole${RAND}" ]] - [[ "$output" =~ "stage: GA" ]] - [[ "$output" =~ "title: My Org Role Title" ]] -} - -@test "Deleting deployment" { - gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -q -} diff --git a/dm/templates/iam_custom_role/tests/integration/iam_custom_role.yaml b/dm/templates/iam_custom_role/tests/integration/iam_custom_role.yaml deleted file mode 100644 index dec630790f7..00000000000 --- a/dm/templates/iam_custom_role/tests/integration/iam_custom_role.yaml +++ /dev/null @@ -1,32 +0,0 @@ -# Test of the IAM custom role template. -# -# Variables: -# CLOUD_FOUNDATION_ORGANIZATION_ID: TYour organization's ID -# RAND: A random string used by the testing suite - -imports: - - path: templates/iam_custom_role/project_custom_role.py - name: project_custom_role.py - - path: templates/iam_custom_role/organization_custom_role.py - name: organization_custom_role.py - -resources: - - name: test-custom-org-role-${RAND} - type: organization_custom_role.py - properties: - orgId: "${CLOUD_FOUNDATION_ORGANIZATION_ID}" - roleId: myCustomOrgRole${RAND} - title: My Org Role Title - description: My Org Role Description - includedPermissions: - - iam.roles.get - - iam.roles.list - - name: project-custom-role-${RAND} - type: project_custom_role.py - properties: - roleId: myCustomProjectRole${RAND} - title: My Project Role Title - description: My Project Role Description - includedPermissions: - - iam.roles.get - - iam.roles.list diff --git a/dm/templates/iam_member/README.md b/dm/templates/iam_member/README.md deleted file mode 100644 index 9a3997a6566..00000000000 --- a/dm/templates/iam_member/README.md +++ /dev/null @@ -1,79 +0,0 @@ -# IAM Member - -This template grants IAM roles for a projects, folders and organizations. - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) - -### Grant the appropriate IAM permissions depending on your usecase -Grant the [owner](https://cloud.google.com/iam/docs/understanding-roles) IAM role on the project to the *DM Service Account* to grant roles within the project. This allows DM to set IAM on the Project or on the resource level. - -For more restrictive permissions grant the appropriate resource level admin permission: - -- Grant the [resourcemanager.projectIamAdmin](https://cloud.google.com/iam/docs/understanding-roles) IAM role on the project to the *DM Service Account* to grant roles within the project -- Grant the [roles/resourcemanager.folderIamAdmin](https://cloud.google.com/iam/docs/understanding-roles) IAM role on the folder to the *DM Service Account* to grant roles within the folder -- Grant the [roles/iam.securityAdmin](https://cloud.google.com/iam/docs/understanding-roles) IAM role on the organization to the *DM Service Account* to grant roles within the organization and all nested resources -- Etc. - -## Development - -### Resources - -Resources are created based on the input properties: -- [cloudresourcemanager-v1:virtual.projects.iamMemberBinding](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/blob/master/google/resource-snippets/cloudresourcemanager-v1/policies.jinja) - - This virtual endpoint implements projects.getIamPolicy and projects.setIamPolicy internally with proper concurancy handling. -- [cloudresourcemanager-v2:virtual.folders.iamMemberBinding](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/blob/master/google/resource-snippets/cloudresourcemanager-v2/policies.jinja) -- [cloudresourcemanager-v1:virtual.organizations.iamMemberBinding](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/blob/master/google/resource-snippets/cloudresourcemanager-v1/policies.jinja) -- storage-v1:virtual.buckets.iamMemberBinding -- cloudfunctions-v1:virtual.projects.locations.functions.iamMemberBinding - -### Properties - -See `properties` section in the schema file(s): - -- [IAM Member](iam_member.py.schema) - -### Usage - -1. Clone the [Deployment Manager samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit): - -```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit -``` - -2. Go to the [dm](../../) directory: - -```shell - cd dm -``` - -3. Copy the example DM config to be used as a model for the deployment; in this case, [examples/iam_member.yaml](examples/iam_member.yaml): - -```shell - cp templates/iam_member/examples/iam_member.yaml my_iammember.yaml -``` - -4. Change the values in the config file to match your specific GCP setup (for properties, refer to the schema files listed above): - -```shell - vim my_iammember.yaml # <== change values to match your GCP setup -``` - -5. Create your deployment (replace with the relevant deployment name): - -```shell - gcloud deployment-manager deployments create \ - --config my_iammember.yaml -``` - -6. In case you need to delete your deployment: - -```shell - gcloud deployment-manager deployments delete -``` - -## Examples - -- [IAM member](examples/iam_member.yaml) diff --git a/dm/templates/iam_member/examples/iam_member.yaml b/dm/templates/iam_member/examples/iam_member.yaml deleted file mode 100644 index 58ffa31fda5..00000000000 --- a/dm/templates/iam_member/examples/iam_member.yaml +++ /dev/null @@ -1,43 +0,0 @@ -# Example of the IAM member template usage. -# -# The `members` property is a list of members to which the -# `role` is applied. -# -# Replace `user-email` with a valid email. -# Replace `service-account` with a valid service account. -# Replace `group-address` with a valid group. -# Replace `domain-name` with a valid domain. -# Replace `folderId` with a folder ID to assign roles to. - - -imports: - - path: templates/iam_member/iam_member.py - name: iam_member.py - -resources: - - name: iam-member-project - type: iam_member.py - properties: - roles: - - role: roles/editor - members: - - user: - - serviceAccount: - - role: roles/viewer - members: - - group: - - domain: - - - name: iam-member-folder - type: iam_member.py - properties: - folderId: "" - roles: - - role: roles/editor - members: - - user: - - serviceAccount: - - role: roles/viewer - members: - - group: - - domain: diff --git a/dm/templates/iam_member/examples/iam_member_resource.yaml b/dm/templates/iam_member/examples/iam_member_resource.yaml deleted file mode 100644 index bd92c266547..00000000000 --- a/dm/templates/iam_member/examples/iam_member_resource.yaml +++ /dev/null @@ -1,47 +0,0 @@ -# Example of the IAM member template usage. -# -# The `members` property is a list of members to which the -# `role` is applied. -# -# Replace `user-email` with a valid email. -# Replace `service-account` with a valid service account. -# Replace `group-address` with a valid group. -# Replace `domain-name` with a valid domain. -# Replace `folderId` with a folder ID to assign roles to. -# Replave `bucket-name-example` with a valid name of a GCS Bucket. -# Replave `function-name-example` with a valid name of a CloudFunction. - - -imports: - - path: templates/iam_member/iam_member.py - name: iam_member.py - -resources: - - name: iam-member-bucket - type: iam_member.py - properties: - roles: - - role: roles/storage.objectAdmin - bucket: - members: - - user: - - serviceAccount: - - role: roles/storage.objectViewer - bucket: - members: - - group: - - domain: - - name: iam-member-function - type: iam_member.py - properties: - roles: - - role: roles/cloudfunctions.developer - cloudFunction: - members: - - user: - - serviceAccount: - - role: roles/cloudfunctions.invoker - cloudFunction: - members: - - group: - - domain: diff --git a/dm/templates/iam_member/iam_member.py b/dm/templates/iam_member/iam_member.py deleted file mode 100644 index 4ab378292e2..00000000000 --- a/dm/templates/iam_member/iam_member.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" This template creates an IAM policy member. """ - -from hashlib import sha1 - -mapper = { - 'organizationId': { - 'dm_type': 'gcp-types/cloudresourcemanager-v1:virtual.organizations.iamMemberBinding', - 'dm_resource_property': 'resource', - 'postfix': 'organization'}, - 'folderId': { - 'dm_type': 'gcp-types/cloudresourcemanager-v2:virtual.folders.iamMemberBinding', - 'dm_resource_property': 'resource', - 'postfix': 'folder'}, - 'projectId': { - 'dm_type': 'gcp-types/cloudresourcemanager-v1:virtual.projects.iamMemberBinding', - 'dm_resource_property': 'resource', - 'postfix': 'project'}, - 'bucket': { - 'dm_type': 'gcp-types/storage-v1:virtual.buckets.iamMemberBinding', - 'dm_resource_property': 'bucket', - 'postfix': 'bucket'}, - 'cloudFunction': { - 'dm_type': 'gcp-types/cloudfunctions-v1:virtual.projects.locations.functions.iamMemberBinding', - 'dm_resource_property': 'resource', - 'postfix': 'cf'} -} - - -def get_type(context): - for resource_type, resource_value in mapper.items(): - if resource_type in context.properties: - resource_value.update({'id': context.properties[resource_type]}) - return resource_value - - # If nothing specified the default is projectID from context - mapper['projectId'].update({'id': context.env['project']}) - return mapper['projectId'] - - -def generate_config(context): - """ Entry point for the deployment resources. """ - - properties = context.properties - - base_resource = get_type(context) - - resources = [] - - if 'dependsOn' in properties: - dependson = {'metadata': {'dependsOn': properties['dependsOn']}} - dependson_root = properties['dependsOn'] - else: - dependson = {} - dependson_root = [] - - for role in properties['roles']: - for member in role['members']: - suffix = sha1( - '{}-{}'.format(role['role'], member).encode('utf-8')).hexdigest()[:10] - policy_get_name = '{}-{}'.format(context.env['name'], suffix) - - resource_name = '{}-{}'.format(policy_get_name, - base_resource['postfix']) - iam_resource = { - 'name': resource_name, - # TODO - Virtual type documentation needed - 'type': base_resource['dm_type'], - 'properties': { - base_resource['dm_resource_property']: base_resource['id'], - 'role': role['role'], - 'member': member, - } - } - iam_resource.update(dependson) - resources.append(iam_resource) - - dependson = {'metadata': {'dependsOn': [ - resource_name] + dependson_root}} - - return {"resources": resources} diff --git a/dm/templates/iam_member/iam_member.py.schema b/dm/templates/iam_member/iam_member.py.schema deleted file mode 100644 index 9f4c82a990b..00000000000 --- a/dm/templates/iam_member/iam_member.py.schema +++ /dev/null @@ -1,131 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: IAM policy member - author: Sourced Group Inc. - version: 1.3.0 - description: | - Manages an IAM policy member - - For more information on this resource: - https://cloud.google.com/iam/docs/overview - - APIs endpoints used by this template: - - gcp-types/cloudresourcemanager-v1:virtual.projects.iamMemberBinding => - TODO - Virtual type documentation needed - - gcp-types/cloudresourcemanager-v2:virtual.folders.iamMemberBinding => - TODO - Virtual type documentation needed - - gcp-types/cloudresourcemanager-v1:virtual.organizations.iamMemberBinding => - TODO - Virtual type documentation needed - - gcp-types/gcp-types/storage-v1:virtual.buckets.iamMemberBinding => - TODO - Virtual type documentation needed - - gcp-types/cloudfunctions-v1:virtual.projects.locations.functions.iamMemberBinding => - TODO - Virtual type documentation needed - -additionalProperties: false - -required: - - roles - -oneOf: - - required: - - folderId - - required: - - organizationId - - required: - - projectId - - required: - - bucket - - required: - - cloudFunction - - allOf: - - not: - required: - - folderId - - not: - required: - - organizationId - - not: - required: - - projectId - -properties: - folderId: - type: string - description: | - Folder ID to assign members to. - organizationId: - type: string - description: | - Organization ID to assign members to. - projectId: - type: string - description: | - Overwrite of project ID in case IAM bindings are referencing to - a different project or if you need to assign members to folders/organizations as well. - bucket: - type: string - description: | - GCS bucket to assign members to. - cloudFunction: - type: string - description: | - CloudFunction to assign members to. - roles: - type: array - uniqueItems: true - minItems: 1 - description: | - An array of roles and members. - items: - type: object - additionalProperties: false - properties: - role: - type: string - description: | - The role to grant to members. - members: - type: array - description: | - A list of identities. - items: - type: string - description: | - Specifies the identity requesting access to a Cloud Platform - resource. Can have the following values: - - user:{emailid} - An email address that represents a specific - IAM User account. For example, user:name@example.com - - serviceAccount:{emailid} - An email address that represents a - Service Account. For example, - serviceAccount:my-other-app@appspot.gserviceaccount.com - - group:{emailid} - An email address that represents a Google group. - For example, group:admins@example.com - - domain:{domain} - A Cloud Identity or G Suite domain name that - represents all the users of that domain. For example, acme.com - or example.com. - dependsOn: - type: array - description: | - The list of the resources that must be created before this template is applied. - items: - type: string - description: The resource name. - -documentation: - - templates/iam_member/README.md - -examples: - - templates/iam_member/examples/iam_member.yaml diff --git a/dm/templates/iam_member/tests/integration/iam_35_members.bats b/dm/templates/iam_member/tests/integration/iam_35_members.bats deleted file mode 100755 index abff65e6669..00000000000 --- a/dm/templates/iam_member/tests/integration/iam_35_members.bats +++ /dev/null @@ -1,99 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -export TEST_SERVICE_ACCOUNT="test-iam-sa-${RAND}" - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < "templates/iam_member/tests/integration/${TEST_NAME}.yaml" > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; this is executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; this is executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - delete_config - rm -f "${RANDOM_FILE}" - fi - - # Per-test teardown steps. -} - - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config "${CONFIG}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -} - -@test "Verifying that roles were assigned to project in deployment ${DEPLOYMENT_NAME}" { - run gcloud projects get-iam-policy "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --flatten="bindings[].members" \ - --format='table(bindings.role)' \ - --filter="bindings.members~${TEST_SERVICE_ACCOUNT}-.*@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com" - [[ "$output" =~ "roles/editor" ]] -} - -@test "Verifying that roles were assigned to folder in deployment ${DEPLOYMENT_NAME}" { - # Get the test folder ID and make it available. - if [ -z "${TEST_ORG_FOLDER_NAME}" ] - then - export TEST_ORG_FOLDER_NAME=$(gcloud alpha resource-manager folders list \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --organization "${CLOUD_FOUNDATION_ORGANIZATION_ID}" | \ - grep "org-folder-${RAND}" | awk '{print $3}') - fi - run gcloud alpha resource-manager folders get-iam-policy "folders/${TEST_ORG_FOLDER_NAME}" \ - --flatten="bindings[].members" \ - --format='table(bindings.role)' \ - --filter="bindings.members~${TEST_SERVICE_ACCOUNT}-.*@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com" - - [[ "$output" =~ "roles/viewer" ]] -} - -@test "Deleting deployment" { - gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -q - - run gcloud projects get-iam-policy "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --flatten="bindings[].members" \ - --format='table(bindings.role)' \ - --filter="bindings.members~${TEST_SERVICE_ACCOUNT}-.*@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com" - [[ ! "$output" =~ "roles/editor" ]] - [[ ! "$output" =~ "roles/viewer" ]] -} diff --git a/dm/templates/iam_member/tests/integration/iam_35_members.yaml b/dm/templates/iam_member/tests/integration/iam_35_members.yaml deleted file mode 100644 index 0ff9724530c..00000000000 --- a/dm/templates/iam_member/tests/integration/iam_35_members.yaml +++ /dev/null @@ -1,313 +0,0 @@ -# Test of the IAM member template . -# -# Variables: -# RAND: A random string used by the testing suite -# TEST_SERVICE_ACCOUNT: Service account name -# CLOUD_FOUNDATION_PROJECT_ID: Project ID - -imports: - - path: templates/iam_member/iam_member.py - name: iam_member.py - -resources: - - name: iam-member-test-project-${RAND} - type: iam_member.py - properties: - roles: - - role: roles/editor - members: - - serviceAccount:${TEST_SERVICE_ACCOUNT}-1@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-2@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-3@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-4@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-5@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-6@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-7@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-8@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-9@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-10@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-11@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-12@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-13@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-14@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-15@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-16@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-17@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-18@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-19@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-20@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-21@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-22@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-23@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-24@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-25@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-26@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-27@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-28@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-29@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-30@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-31@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-32@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-33@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-34@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-35@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - name: iam-member-test-folder-${RAND} - type: iam_member.py - properties: - folderId: $(ref.test-folder-${RAND}.name) - roles: - - role: roles/viewer - members: - - serviceAccount:${TEST_SERVICE_ACCOUNT}-1@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-2@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-3@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-4@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-5@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-6@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-7@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-8@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-9@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-10@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-11@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-12@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-13@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-14@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-15@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-16@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-17@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-18@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-19@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-20@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-21@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-22@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-23@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-24@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-25@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-26@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-27@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-28@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-29@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-30@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-31@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-32@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-33@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-34@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - serviceAccount:${TEST_SERVICE_ACCOUNT}-35@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - - name: ${TEST_SERVICE_ACCOUNT}-1-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-1 - displayName: ${TEST_SERVICE_ACCOUNT}-1 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-2-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-2 - displayName: ${TEST_SERVICE_ACCOUNT}-2 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-3-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-3 - displayName: ${TEST_SERVICE_ACCOUNT}-3 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-4-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-4 - displayName: ${TEST_SERVICE_ACCOUNT}-4 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-5-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-5 - displayName: ${TEST_SERVICE_ACCOUNT}-5 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-6-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-6 - displayName: ${TEST_SERVICE_ACCOUNT}-6 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-7-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-7 - displayName: ${TEST_SERVICE_ACCOUNT}-7 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-8-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-8 - displayName: ${TEST_SERVICE_ACCOUNT}-8 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-9-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-9 - displayName: ${TEST_SERVICE_ACCOUNT}-9 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-10-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-10 - displayName: ${TEST_SERVICE_ACCOUNT}-10 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-11-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-11 - displayName: ${TEST_SERVICE_ACCOUNT}-11 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-12-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-12 - displayName: ${TEST_SERVICE_ACCOUNT}-12 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-13-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-13 - displayName: ${TEST_SERVICE_ACCOUNT}-13 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-14-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-14 - displayName: ${TEST_SERVICE_ACCOUNT}-14 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-15-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-15 - displayName: ${TEST_SERVICE_ACCOUNT}-15 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-16-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-16 - displayName: ${TEST_SERVICE_ACCOUNT}-16 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-17-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-17 - displayName: ${TEST_SERVICE_ACCOUNT}-17 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-18-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-18 - displayName: ${TEST_SERVICE_ACCOUNT}-18 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-19-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-19 - displayName: ${TEST_SERVICE_ACCOUNT}-19 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-20-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-20 - displayName: ${TEST_SERVICE_ACCOUNT}-20 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-21-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-21 - displayName: ${TEST_SERVICE_ACCOUNT}-21 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-22-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-22 - displayName: ${TEST_SERVICE_ACCOUNT}-22 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-23-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-23 - displayName: ${TEST_SERVICE_ACCOUNT}-23 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-24-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-24 - displayName: ${TEST_SERVICE_ACCOUNT}-24 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-25-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-25 - displayName: ${TEST_SERVICE_ACCOUNT}-25 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-26-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-26 - displayName: ${TEST_SERVICE_ACCOUNT}-26 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-27-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-27 - displayName: ${TEST_SERVICE_ACCOUNT}-27 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-28-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-28 - displayName: ${TEST_SERVICE_ACCOUNT}-28 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-29-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-29 - displayName: ${TEST_SERVICE_ACCOUNT}-29 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-30-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-30 - displayName: ${TEST_SERVICE_ACCOUNT}-310 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-31-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-31 - displayName: ${TEST_SERVICE_ACCOUNT}-31 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-32-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-32 - displayName: ${TEST_SERVICE_ACCOUNT}-32 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-33-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-33 - displayName: ${TEST_SERVICE_ACCOUNT}-33 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-34-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-34 - displayName: ${TEST_SERVICE_ACCOUNT}-34 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - name: ${TEST_SERVICE_ACCOUNT}-35-${CLOUD_FOUNDATION_PROJECT_ID} - type: iam.v1.serviceAccount - properties: - accountId: ${TEST_SERVICE_ACCOUNT}-35 - displayName: ${TEST_SERVICE_ACCOUNT}-35 - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - - - name: test-folder-${RAND} - type: gcp-types/cloudresourcemanager-v2:folders - properties: - name: org-folder-${RAND} - parent: organizations/${CLOUD_FOUNDATION_ORGANIZATION_ID} - displayName: org-folder-${RAND} diff --git a/dm/templates/iam_member/tests/integration/iam_member.bats b/dm/templates/iam_member/tests/integration/iam_member.bats deleted file mode 100755 index 6b3cea6b3ef..00000000000 --- a/dm/templates/iam_member/tests/integration/iam_member.bats +++ /dev/null @@ -1,132 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -export TEST_SERVICE_ACCOUNT="test-sa-${RAND}" - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < "templates/iam_member/tests/integration/${TEST_NAME}.yaml" > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; this is executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - gcloud iam service-accounts create "${TEST_SERVICE_ACCOUNT}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; this is executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - gcloud iam service-accounts delete "${TEST_SERVICE_ACCOUNT}@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com" \ - --quiet --project "${CLOUD_FOUNDATION_PROJECT_ID}" - delete_config - rm -f "${RANDOM_FILE}" - fi - - # Per-test teardown steps. -} - - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config "${CONFIG}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -} - -@test "Verifying that roles were assigned to project in deployment ${DEPLOYMENT_NAME}" { - run gcloud projects get-iam-policy "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --flatten="bindings[].members" \ - --format='table(bindings.role)' \ - --filter="bindings.members:${TEST_SERVICE_ACCOUNT}@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com" - echo "status = ${status}" - echo "output = ${output}" - [[ "$output" =~ "roles/editor" ]] - [[ "$output" =~ "roles/viewer" ]] -} - -@test "Verifying that roles were assigned to folder in deployment ${DEPLOYMENT_NAME}" { - # Get the test folder ID and make it available. - TEST_ORG_FOLDER_NAME=$(gcloud alpha resource-manager folders list \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --organization "${CLOUD_FOUNDATION_ORGANIZATION_ID}" | \ - grep "org-folder-${RAND}" | awk '{print $3}') - run gcloud alpha resource-manager folders get-iam-policy "folders/${TEST_ORG_FOLDER_NAME}" \ - --flatten="bindings[].members" \ - --format='table(bindings.role)' \ - --filter="bindings.members:${TEST_SERVICE_ACCOUNT}@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com" - echo "status = ${status}" - echo "output = ${output}" - - [[ "$output" =~ "roles/editor" ]] - [[ "$output" =~ "roles/viewer" ]] -} - -@test "Verify if SA has roles on the bucket" { - role=$(gsutil iam get "gs://org-bucket-${RAND}/" | grep role) - echo "status = ${status}" - echo "output = ${output}" - [[ "$status" -eq 0 ]] - [[ "$role" =~ "roles/storage.objectAdmin" ]] - [[ "$role" =~ "roles/storage.objectViewer" ]] - -} - -@test "Verifying that roles were assigned to CloudFunction in deployment ${DEPLOYMENT_NAME}" { - run gcloud functions get-iam-policy "test-function-${RAND}" \ - --region=europe-west2 \ - --project=${CLOUD_FOUNDATION_PROJECT_ID} \ - --flatten="bindings[].members" \ - --format='table(bindings.role)' \ - --filter="bindings.members:${TEST_SERVICE_ACCOUNT}@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com" - echo "status = ${status}" - echo "output = ${output}" - [[ "$output" =~ "roles/cloudfunctions.developer" ]] - [[ "$output" =~ "roles/cloudfunctions.invoker" ]] -} - - -@test "Deleting deployment" { - gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -q - - run gcloud projects get-iam-policy "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --flatten="bindings[].members" \ - --format='table(bindings.role)' \ - --filter="bindings.members:${TEST_SERVICE_ACCOUNT}@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com" - echo "status = ${status}" - echo "output = ${output}" - [[ ! "$output" =~ "roles/editor" ]] - [[ ! "$output" =~ "roles/viewer" ]] -} diff --git a/dm/templates/iam_member/tests/integration/iam_member.yaml b/dm/templates/iam_member/tests/integration/iam_member.yaml deleted file mode 100644 index 643b14c3ad4..00000000000 --- a/dm/templates/iam_member/tests/integration/iam_member.yaml +++ /dev/null @@ -1,80 +0,0 @@ -# Test of the IAM member template . -# -# Variables: -# RAND: A random string used by the testing suite -# TEST_SERVICE_ACCOUNT: Service account name -# CLOUD_FOUNDATION_PROJECT_ID: Project ID - -imports: - - path: templates/iam_member/iam_member.py - name: iam_member.py - -resources: - - name: iam-member-test-project-${RAND} - type: iam_member.py - properties: - roles: - - role: roles/editor - members: - - serviceAccount:${TEST_SERVICE_ACCOUNT}@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - role: roles/viewer - members: - - serviceAccount:${TEST_SERVICE_ACCOUNT}@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - name: iam-member-test-folder-${RAND} - type: iam_member.py - properties: - folderId: $(ref.test-folder-${RAND}.name) - roles: - - role: roles/editor - members: - - serviceAccount:${TEST_SERVICE_ACCOUNT}@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - role: roles/viewer - members: - - serviceAccount:${TEST_SERVICE_ACCOUNT}@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - name: iam-member-test-bucket-${RAND} - type: iam_member.py - properties: - bucket: $(ref.test-bucket-${RAND}.name) - roles: - - role: roles/storage.objectAdmin - members: - - serviceAccount:${TEST_SERVICE_ACCOUNT}@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - role: roles/storage.objectViewer - members: - - serviceAccount:${TEST_SERVICE_ACCOUNT}@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - name: iam-member-test-function-${RAND} - type: iam_member.py - properties: - cloudFunction: $(ref.test-function-${RAND}.name) - roles: - - role: roles/cloudfunctions.developer - members: - - serviceAccount:${TEST_SERVICE_ACCOUNT}@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - role: roles/cloudfunctions.invoker - members: - - serviceAccount:${TEST_SERVICE_ACCOUNT}@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - - - name: test-folder-${RAND} - type: gcp-types/cloudresourcemanager-v2:folders - properties: - name: org-folder-${RAND} - parent: organizations/${CLOUD_FOUNDATION_ORGANIZATION_ID} - displayName: org-folder-${RAND} - - - name: test-bucket-${RAND} - type: gcp-types/storage-v1:buckets - properties: - name: org-bucket-${RAND} - project: ${CLOUD_FOUNDATION_PROJECT_ID} - - - type: gcp-types/cloudfunctions-v1:projects.locations.functions - name: test-function-${RAND} - properties: - parent: projects/${CLOUD_FOUNDATION_PROJECT_ID}/locations/europe-west2 - function: test-function-${RAND} - sourceArchiveUrl: gs://cloud-function-sample/function.zip - entryPoint: handler - runtime: nodejs8 - httpsTrigger: {} - diff --git a/dm/templates/instance/README.md b/dm/templates/instance/README.md deleted file mode 100644 index a2d7ff8c0cb..00000000000 --- a/dm/templates/instance/README.md +++ /dev/null @@ -1,65 +0,0 @@ -# Instance - -This template creates a Compute Instance. - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) -- Grant the [compute.admin](https://cloud.google.com/compute/docs/access/iam) IAM role to the [Deployment Manager service account](https://cloud.google.com/deployment-manager/docs/access-control#access_control_for_deployment_manager) - -## Deployment - -### Resources - -- [compute.v1.instance](https://cloud.google.com/compute/docs/reference/rest/v1/instances) - -### Properties - -See the `properties` section in the schema file(s): - -- [Instance](instance.py.schema) - -### Usage - -1. Clone the [Deployment Manager samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit): - -```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit -``` - -2. Go to the [dm](../../) directory: - -```shell - cd dm -``` - -3. Copy the example DM config to be used as a model for the deployment; in this case, [examples/instance.yaml](examples/instance.yaml): - -```shell - cp templates/instance/examples/instance.yaml my_instance.yaml -``` - -4. Change the values in the config file to match your specific GCP setup (for properties, refer to the schema files listed above): - -```shell - vim my_instance.yaml # <== change values to match your GCP setup -``` - -5. Create your deployment (replace with the relevant deployment name): - -```shell - gcloud deployment-manager deployments create \ - --config my_instance.yaml -``` - -6. In case you need to delete your deployment: - -```shell - gcloud deployment-manager deployments delete -``` - -## Examples - -- [Compute Instance](examples/instance.yaml) -- [Compute Instance with private IP](examples/instance_private.yaml) diff --git a/dm/templates/instance/examples/instance.yaml b/dm/templates/instance/examples/instance.yaml deleted file mode 100644 index 12ce0881245..00000000000 --- a/dm/templates/instance/examples/instance.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Example on the Instance template usage. -# -# In this example, a Ubuntu 18.04 Compute Instance with Nginx is created. - -imports: - - path: templates/instance/instance.py - name: instance.py - -resources: - - name: test-instance - type: instance.py - properties: - zone: us-central1-a - diskImage: projects/ubuntu-os-cloud/global/images/family/ubuntu-1804-lts - machineType: f1-micro - diskType: pd-ssd - networks: - - network: default - accessConfigs: - - type: ONE_TO_ONE_NAT - metadata: - items: - - key: startup-script - value: sudo apt-get update && sudo apt-get install -y nginx - tags: - items: - - your-tag diff --git a/dm/templates/instance/examples/instance_private.yaml b/dm/templates/instance/examples/instance_private.yaml deleted file mode 100644 index 5645b9c3d13..00000000000 --- a/dm/templates/instance/examples/instance_private.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# Example on the Instance template usage. -# -# In this example, a Ubuntu 18.04 Compute Instance with Nginx is created. - -imports: - - path: templates/instance/instance.py - name: instance.py - -resources: - - name: test-instance - type: instance.py - properties: - zone: us-central1-a - diskImage: projects/ubuntu-os-cloud/global/images/family/ubuntu-1804-lts - machineType: f1-micro - diskType: pd-ssd - networks: - - network: default - networkIP: 10.0.2.1 - metadata: - items: - - key: startup-script - value: sudo apt-get update && sudo apt-get install -y nginx - tags: - items: - - your-tag diff --git a/dm/templates/instance/examples/instance_source_template.yaml b/dm/templates/instance/examples/instance_source_template.yaml deleted file mode 100644 index 7cbbd978c17..00000000000 --- a/dm/templates/instance/examples/instance_source_template.yaml +++ /dev/null @@ -1,31 +0,0 @@ -# Example of the Instance with sourceInstanceTemplate property provided. -# -# Variables: -# RAND: A random string used by the testing suite. - -imports: - - path: templates/instance_template/instance_template.py - name: instance_template.py - - path: templates/instance/instance.py - name: instance.py - -resources: - - name: test-instance-template-${RAND} - type: instance_template.py - properties: - diskImage: projects/ubuntu-os-cloud/global/images/family/ubuntu-1804-lts - diskSizeGb: 100 - machineType: f1-micro - diskType: pd-ssd - canIpForward: true - network: test-network-${RAND} - subnetwork: regions/us-central1/subnetworks/test-subnet-${RAND} - metadata: - items: - - key: startup-script - value: sudo apt-get update && sudo apt-get install -y nginx\ - - name: test-instance-${RAND} - type: instance.py - properties: - zone: us-central1-a - sourceInstanceTemplate: $(ref.test-instance-template-${RAND}.selfLink) diff --git a/dm/templates/instance/instance.py b/dm/templates/instance/instance.py deleted file mode 100644 index 93a767e7eb6..00000000000 --- a/dm/templates/instance/instance.py +++ /dev/null @@ -1,167 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" This template creates a Compute Instance.""" - -def set_optional_property(receiver, source, property_name): - """ If set, copies the given property value from one object to another. """ - - if property_name in source: - receiver[property_name] = source[property_name] - -def create_boot_disk(properties, zone, instance_name): - """ Create a boot disk configuration. """ - - disk_name = instance_name - boot_disk = { - 'deviceName': disk_name, - 'type': 'PERSISTENT', - 'boot': True, - 'autoDelete': True, - 'initializeParams': { - 'sourceImage': properties['diskImage'] - } - } - - disk_params = boot_disk['initializeParams'] - set_optional_property(disk_params, properties, 'diskSizeGb') - - disk_type = properties.get('diskType') - if disk_type: - disk_params['diskType'] = 'zones/{}/diskTypes/{}'.format(zone, - disk_type) - - return boot_disk - -def get_network_interfaces(properties): - """ Get the configuration that connects the instance to an existing network - and assigns to it an ephemeral public IP if specified. - """ - network_interfaces = [] - - networks = properties.get('networks', []) - if len(networks) == 0 and properties.get('network'): - network = { - "network": properties.get('network'), - "subnetwork": properties.get('subnetwork'), - "networkIP": properties.get('networkIP'), - } - networks.append(network) - if (properties.get('hasExternalIp')): - network['accessConfigs'] = [{ - "type": "ONE_TO_ONE_NAT", - }] - if properties.get('natIP'): - network['accessConfigs'][0]['natIP'] = properties.get('natIP') - - for network in networks: - if not '.' in network['network'] and not '/' in network['network']: - network_name = 'global/networks/{}'.format(network['network']) - else: - network_name = network['network'] - - network_interface = { - 'network': network_name, - } - - netif_optional_props = ['subnetwork', 'networkIP', 'aliasIpRanges', 'accessConfigs'] - for prop in netif_optional_props: - if network.get(prop): - network_interface[prop] = network[prop] - network_interfaces.append(network_interface) - - return network_interfaces - - -def generate_config(context): - """ Entry point for the deployment resources. """ - - properties = context.properties - zone = properties['zone'] - vm_name = properties.get('name', context.env['name']) - project_id = properties.get('project', context.env['project']) - network_interfaces = get_network_interfaces(properties) - - instance = { - 'name': context.env['name'], - # https://cloud.google.com/compute/docs/reference/rest/v1/instances - 'type': 'gcp-types/compute-v1:instances', - 'properties': { - 'name': vm_name, - 'zone': zone, - 'project': project_id - } - } - - if not properties.get('sourceInstanceTemplate'): - instance['properties']['machineType'] = 'zones/{}/machineTypes/{}'.format(zone, properties['machineType']) - instance['properties']['networkInterfaces'] = network_interfaces - optional_properties = [ - 'description', - 'scheduling', - 'disks', - 'minCpuPlatform', - 'guestAccelerators', - 'deletionProtection', - 'hostname', - 'shieldedInstanceConfig', - 'shieldedInstanceIntegrityPolicy', - 'labels', - 'metadata', - 'serviceAccounts', - 'canIpForward', - 'tags', - ] - for name in optional_properties: - set_optional_property(instance['properties'], properties, name) - - if not properties.get('disks'): - instance['properties']['disks'] = [create_boot_disk(properties, zone, vm_name)] - else: - instance['properties']['sourceInstanceTemplate'] = properties['sourceInstanceTemplate'] - - outputs = [ - { - 'name': 'networkInterfaces', - 'value': '$(ref.{}.networkInterfaces)'.format(context.env['name']) - }, - { - 'name': 'name', - 'value': '$(ref.{}.name)'.format(context.env['name']) - }, - { - 'name': 'selfLink', - 'value': '$(ref.{}.selfLink)'.format(context.env['name']) - } - ] - - if not properties.get('sourceInstanceTemplate') and len(network_interfaces) == 1: - outputs.append({ - 'name': 'internalIp', - 'value': '$(ref.{}.networkInterfaces[0].networkIP)'.format(context.env['name']) - }) - - if 'accessConfigs' in network_interfaces[0]: - accessConfigs = network_interfaces[0]['accessConfigs'] - for i, row in enumerate(accessConfigs, 0): - if row['type'] == 'ONE_TO_ONE_NAT': - outputs.append({ - 'name': 'externalIp', - 'value': '$(ref.{}.networkInterfaces[0].accessConfigs[{}].natIP)'.format(context.env['name'], i) - }) - break - - print(instance) - - - return {'resources': [instance], 'outputs': outputs} diff --git a/dm/templates/instance/instance.py.schema b/dm/templates/instance/instance.py.schema deleted file mode 100644 index da1322150a9..00000000000 --- a/dm/templates/instance/instance.py.schema +++ /dev/null @@ -1,750 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Compute Instance - author: Sourced Group Inc. - version: 1.0.0 - description: | - Deploys a Compute Instance connected to a custom (or default) network. - - For more information on this resource: - https://cloud.google.com/compute/ - - APIs endpoints used by this template: - - gcp-types/compute-v1:instances => - https://cloud.google.com/compute/docs/reference/rest/v1/instances - -required: - - zone - -oneOf: - - allOf: - - required: - - machineType - - diskImage - oneOf: - - allOf: - - required: - - networks - - properties: - networks: - minItems: 1 - - not: - required: - - network - - not: - required: - - natIP - - not: - required: - - subnetwork - - not: - required: - - networkIP - - allOf: - - required: - - network - - not: - required: - - networks - - required: - - sourceInstanceTemplate - -additionalProperties: false - -definitions: - hasExternalIp: - type: boolean - default: true - description: | - Defines wether the instance will use an external IP from a shared - ephemeral IP address pool. If this is set to false, the instance - will not have an external IP. - natIP: - type: string - description: | - An external IP address associated with this instance. Specify an unused - static external IP address available to the project or leave this field - undefined to use an IP from a shared ephemeral IP address pool. If you - specify a static external IP address, it must live in the same region - as the zone of the instance. - If hasExternalIp is false this field is ignored. - network: - type: string - description: | - URL of the network resource for this instance. When creating an instance, if neither the network - nor the subnetwork is specified, the default network global/networks/default is used; - if the network is not specified but the subnetwork is specified, the network is inferred. - - If you specify this property, you can specify the network as a full or partial URL. - For example, the following are all valid URLs: - - - https://www.googleapis.com/compute/v1/projects/project/global/networks/network - - projects/project/global/networks/network - - global/networks/default - Authorization requires one or more of the following Google IAM permissions on the specified resource network: - - - compute.networks.use - - compute.networks.useExternalIp - subnetwork: - type: string - description: | - The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, - do not specify this field. If the network is in auto subnet mode, specifying the subnetwork is optional. - If the network is in custom subnet mode, specifying the subnetwork is required. - If you specify this field, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - - - https://www.googleapis.com/compute/v1/projects/project/regions/region/subnetworks/subnetwork - - regions/region/subnetworks/subnetwork - Authorization requires one or more of the following Google IAM permissions on the specified resource subnetwork: - - - compute.subnetworks.use - - compute.subnetworks.useExternalIp - networkIP: - type: string - description: | - An IPv4 internal IP address to assign to the instance for this network interface. - If not specified by the user, an unused internal IP is assigned by the system. - -properties: - name: - type: string - description: The name of the Instance resource. Resource name would be used if omitted. - project: - type: string - description: | - The project ID of the project containing the instance. - description: - type: string - description: | - An optional description of this resource. Provide this property when you create the resource. - network: - $ref: '#/definitions/network' - subnetwork: - $ref: '#/definitions/subnetwork' - networkIP: - $ref: '#/definitions/networkIP' - hasExternalIp: - $ref: '#/definitions/hasExternalIp' - natIP: - $ref: '#/definitions/natIP' - networks: - type: array - description: | - Networks the instance will be connected to; - e.g., 'my-custom-network' or 'default'. - items: - type: object - additionalProperties: false - required: - - network - properties: - network: - $ref: '#/definitions/network' - subnetwork: - $ref: '#/definitions/subnetwork' - networkIP: - $ref: '#/definitions/networkIP' - aliasIpRanges: - type: array - uniqueItems: true - description: | - An array of alias IP ranges for this network interface. You can only specify this - field for network interfaces in VPC networks. - items: - type: object - additionalProperties: false - properties: - ipCidrRange: - type: string - description: | - The IP alias ranges to allocate for this interface. This IP CIDR range must belong - to the specified subnetwork and cannot contain IP addresses reserved by system or - used by other network interfaces. This range may be a single IP address (such as 10.2.3.4), - a netmask (such as /24) or a CIDR-formatted string (such as 10.1.2.0/24). - subnetworkRangeName: - type: string - description: | - The name of a subnetwork secondary IP range from which to allocate an IP alias range. - If not specified, the primary range of the subnetwork is used. - accessConfigs: - type: array - uniqueItems: true - description: | - An array of configurations for this interface. Currently, only one access config, ONE_TO_ONE_NAT, - is supported. If there are no accessConfigs specified, then this instance will have no external internet access. - items: - type: object - additionalProperties: false - properties: - type: - type: string - description: | - The type of configuration. The default and only option is ONE_TO_ONE_NAT. - enum: - - ONE_TO_ONE_NAT - name: - type: string - description: | - The name of this access configuration. The default and recommended name is External NAT, - but you can use any arbitrary string, such as My external IP or Network Access. - setPublicPtr: - type: boolean - description: | - Specifies whether a public DNS 'PTR' record should be created to map the external - IP address of the instance to a DNS domain name. - publicPtrDomainName: - type: string - description: | - The DNS domain name for the public PTR record. You can set this field only - if the setPublicPtr field is enabled. - networkTier: - type: string - description: | - This signifies the networking tier used for configuring this access configuration - and can only take the following values: PREMIUM, STANDARD. - - If an AccessConfig is specified without a valid external IP address, an - ephemeral IP will be created with this networkTier. - - If an AccessConfig with a valid external IP address is specified, it must match - that of the networkTier associated with the Address resource owning that IP. - enum: - - STANDARD - - PREMIUM - natIP: - $ref: '#/definitions/natIP' - zone: - type: string - description: Availability zone. E.g. 'us-central1-a' - tags: - type: object - additionalProperties: false - description: | - Tags to apply to this instance. Tags are used to identify valid sources - or targets for network firewalls and are specified by the client during - instance creation. The tags can be later modified by the setTags - method. Each tag within the list must comply with RFC1035. Multiple tags - can be specified via the 'tags.items' field. - properties: - items: - type: array - uniqueItems: true - description: | - An array of tags. Each tag must be 1-63 characters long, and comply - with RFC1035. - items: - type: string - machineType: - type: string - description: | - The Compute Instance type; e.g., 'n1-standard-1'. - See https://cloud.google.com/compute/docs/machine-types for details. - disks: - type: array - uniqueItems: true - description: | - Array of disks associated with this instance. Persistent disks must be created before you can assign them. - items: - type: object - additionalProperties: false - oneOf: - - required: - - source - - required: - - initializeParams - - allOf: - - not: - required: - - source - - not: - required: - - initializeParams - properties: - type: - type: string - description: | - Specifies the type of the disk, either SCRATCH or PERSISTENT. If not specified, the default is PERSISTENT. - enum: - - SCRATCH - - PERSISTENT - mode: - type: string - description: | - The mode in which to attach this disk, either READ_WRITE or READ_ONLY. - If not specified, the default is to attach the disk in READ_WRITE mode. - enum: - - READ_WRITE - - READ_ONLY - source: - type: string - description: | - Specifies a valid partial or full URL to an existing Persistent Disk resource. - When creating a new instance, one of initializeParams.sourceImage or - disks.source is required except for local SSD. - - If desired, you can also attach existing non-root persistent disks using this property. - This field is only applicable for persistent disks. - - Note that for InstanceTemplate, specify the disk name, not the URL for the disk. - - Authorization requires one or more of the following Google IAM permissions on the specified resource source: - - compute.disks.use - compute.disks.useReadOnly - deviceName: - type: string - description: | - Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* - tree of a Linux operating system running within the instance. This name can be used to reference - the device for mounting, resizing, and so on, from within the instance. - - If not specified, the server chooses a default device name to apply to this disk, in the - form persistent-disk-x, where x is a number assigned by Google Compute Engine. - This field is only applicable for persistent disks. - boot: - type: boolean - description: | - Indicates that this is a boot disk. The virtual machine will use the first partition - of the disk for its root filesystem. - initializeParams: - type: object - additionalProperties: false - description: | - Specifies the parameters for a new disk that will be created alongside the new instance. - Use initialization parameters to create boot disks or local SSDs attached to the new instance. - - This property is mutually exclusive with the source property; you can only define one or the other, but not both. - properties: - labels: - type: object - description: | - Labels to apply to this disk. These can be later modified by the disks.setLabels method. - This field is only applicable for persistent disks. - - An object containing a list of "key": value pairs. - Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. - - Authorization requires the following Google IAM permission on the specified resource labels: - - compute.disks.setLabels - diskName: - type: string - description: | - Specifies the disk name. If not specified, the default is to use the name of the instance. - If the disk with the instance name exists already in the given zone/region, - a new name will be automatically generated. - sourceImage: - type: string - description: | - The source image to create this disk. When creating a new instance, one of - initializeParams.sourceImage or disks.source is required except for local SSD. - - To create a disk with one of the public operating system images, specify the image by its family name. - For example, specify family/debian-9 to use the latest Debian 9 image: - - projects/debian-cloud/global/images/family/debian-9 - - Alternatively, use a specific version of a public operating system image: - - projects/debian-cloud/global/images/debian-9-stretch-vYYYYMMDD - - To create a disk with a custom image that you created, specify the image name in the following format: - - global/images/my-custom-image - - You can also specify a custom image by its image family, which returns the latest version of the - image in that family. Replace the image name with family/family-name: - - global/images/family/my-image-family - - If the source image is deleted later, this field will not be set. - - Authorization requires the following Google IAM permission on the specified resource sourceImage: - - compute.images.useReadOnly - description: - type: string - description: | - An optional description. Provide this property when creating the disk. - diskSizeGb: - type: number - description: | - Specifies the size of the disk in base-2 GB. - diskType: - type: string - description: | - Specifies the disk type to use to create the instance. If not specified, the default is pd-standard, - specified using the full URL. For example: - - https://www.googleapis.com/compute/v1/projects/project/zones/zone/diskTypes/pd-standard - - Other values include pd-ssd and local-ssd. If you define this field, you can provide either the full - or partial URL. For example, the following are valid values: - - https://www.googleapis.com/compute/v1/projects/project/zones/zone/diskTypes/diskType - projects/project/zones/zone/diskTypes/diskType - zones/zone/diskTypes/diskType - Note that for InstanceTemplate, this is the name of the disk type, not URL. - sourceImageEncryptionKey: - type: object - additionalProperties: false - description: | - The customer-supplied encryption key of the source image. Required if the source image is - protected by a customer-supplied encryption key. - - Instance templates do not store customer-supplied encryption keys, so you cannot create disks - for instances in a managed instance group if the source images are encrypted with your own keys. - properties: - rawKey: - type: string - description: | - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 - to either encrypt or decrypt this resource. - kmsKeyName: - type: string - description: | - The name of the encryption key that is stored in Google Cloud KMS. - sourceSnapshot: - type: string - description: | - The source snapshot to create this disk. When creating a new instance, one of - initializeParams.sourceSnapshot or disks.source is required except for local SSD. - - To create a disk with a snapshot that you created, specify the snapshot name in the following format: - - global/snapshots/my-backup - - If the source snapshot is deleted later, this field will not be set. - - Authorization requires the following Google IAM permission on the specified resource sourceSnapshot: - - compute.snapshots.useReadOnly - sourceSnapshotEncryptionKey: - type: object - additionalProperties: false - description: | - The customer-supplied encryption key of the source snapshot. - properties: - rawKey: - type: string - description: | - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 - to either encrypt or decrypt this resource. - kmsKeyName: - type: string - description: | - The name of the encryption key that is stored in Google Cloud KMS. - autoDelete: - type: boolean - description: | - Specifies whether the disk will be auto-deleted when the instance is deleted - (but not when the disk is detached from the instance). - interface: - type: string - description: | - Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. - The default is SCSI. Persistent disks must always use SCSI and the request will fail if you - attempt to attach a persistent disk in any other format than SCSI. Local SSDs can use either NVME or SCSI. - For performance characteristics of SCSI over NVMe, see Local SSD performance. - enum: - - SCSI - - NVME - guestOsFeatures: - type: array - uniqueItems: true - description: | - A list of features to enable on the guest operating system. Applicable only for bootable images. - Read Enabling guest operating system features to see a list of available options. - items: - type: object - additionalProperties: false - properties: - type: - type: string - description: | - https://cloud.google.com/compute/docs/images/create-delete-deprecate-private-images#guest-os-features - The ID of a supported feature. Read Enabling guest operating system features - to see a list of available options. - enum: - - MULTI_IP_SUBNET - - SECURE_BOOT - - UEFI_COMPATIBLE - - VIRTIO_SCSI_MULTIQUEUE - - WINDOWS - diskEncryptionKey: - type: object - additionalProperties: false - description: | - The customer-supplied encryption key of the source snapshot. - properties: - rawKey: - type: string - description: | - Encrypts or decrypts a disk using a customer-supplied encryption key. - - If you are creating a new disk, this field encrypts the new disk using an encryption - key that you provide. If you are attaching an existing disk that is already encrypted, - this field decrypts the disk using the customer-supplied encryption key. - - If you encrypt a disk using a customer-supplied key, you must provide the same key again when - you attempt to use this resource at a later time. For example, you must provide the key when - you create a snapshot or an image from the disk or when you attach the disk - to a virtual machine instance. - - If you do not provide an encryption key, then the disk will be encrypted using an automatically - generated key and you do not need to provide a key to use the disk later. - - Instance templates do not store customer-supplied encryption keys, so you cannot use your own keys - to encrypt disks in a managed instance group. - kmsKeyName: - type: string - description: | - The name of the encryption key that is stored in Google Cloud KMS. - canIpForward: - type: boolean - default: False - description: | - If "True". allows the instance to send and receive packets with non-matching destination - and source IPs. - diskType: - type: string - description: The boot disk type. - enum: - - pd-ssd - - pd-standard - - local-ssd - diskImage: - type: string - default: None - description: | - The source image for the disk. To create the disk with one of the - public operating system images, specify the image by its family name. - For example, use "projects/debian-cloud/global/images/family/debian-9" - to install the latest Debian 9 image. - To create a disk with a custom image (that you created), specify the image - name in the following format: global/images/my-custom-image. - See https://cloud.google.com/compute/docs/images for details. - diskSizeGb: - type: integer - minimum: 10 - scheduling: - type: object - additionalProperties: false - description: | - Sets the scheduling options for this instance. - properties: - onHostMaintenance: - type: string - description: | - Defines the maintenance behavior for this instance. For standard instances, the default behavior is MIGRATE. - For preemptible instances, the default and only possible behavior is TERMINATE. - For more information, see Setting Instance Scheduling Options. - enum: - - MIGRATE - - TERMINATE - automaticRestart: - type: boolean - description: | - Specifies whether the instance should be automatically restarted if it is terminated by Compute Engine - (not terminated by a user). You can only set the automatic restart option for standard instances. - Preemptible instances cannot be automatically restarted. - - By default, this is set to true so an instance is automatically restarted if it is terminated by Compute Engine. - preemptible: - type: boolean - description: | - Defines whether the instance is preemptible. This can only be set during instance creation, - it cannot be set or changed after the instance has been created. - nodeAffinities: - type: array - uniqueItems: true - description: | - A set of node affinity and anti-affinity. - items: - type: object - additionalProperties: false - properties: - key: - type: string - description: | - Corresponds to the label key of Node resource. - operator: - type: string - description: | - Defines the operation of node selection. - values: - type: array - uniqueItems: true - description: | - Corresponds to the label values of Node resource. - items: - type: string - deletionProtection: - type: boolean - description: | - Whether the resource should be protected against deletion. - - Authorization requires the following Google IAM permission on the specified resource deletionProtection: - - compute.instances.setDeletionProtection - hostname: - type: string - labels: - type: object - description: | - Labels to apply to this instance. These can be later modified by the setLabels method. - - An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. - - Authorization requires the following Google IAM permission on the specified resource labels: - - compute.instances.setLabels - minCpuPlatform: - type: string - description: | - Specifies a minimum CPU platform for the VM instance. Applicable values are the friendly names of CPU platforms, - such as minCpuPlatform: "Intel Haswell" or minCpuPlatform: "Intel Sandy Bridge". - enum: - - Intel Sandy Bridge - - Intel Ivy Bridge - - Intel Haswell - - Intel Broadwell - - Intel Skylake - shieldedInstanceConfig: - type: object - additionalProperties: false - properties: - enableSecureBoot: - type: boolean - description: | - Defines whether the instance has Secure Boot enabled. - enableVtpm: - type: boolean - description: | - Defines whether the instance has the vTPM enabled. - enableIntegrityMonitoring: - type: boolean - description: | - Defines whether the instance has integrity monitoring enabled. - shieldedInstanceIntegrityPolicy: - type: object - additionalProperties: false - properties: - updateAutoLearnPolicy: - type: boolean - description: | - Updates the integrity policy baseline using the measurements from the VM instance's most recent boot. - metadata: - type: object - additionalProperties: false - required: - - items - description: | - The instance metadata. For example: - metadata: - items: - - key: startup-script - - value: sudo apt-get update - properties: - items: - type: array - uniqueItems: true - description: | - A collection of metadata key-value pairs. - items: - type: object - additionalProperties: false - properties: - key: - type: string - value: - type: [string, number, boolean] - serviceAccounts: - type: array - uniqueItems: true - description: | - A list of service accounts, with their specified scopes, authorized for - this instance. Only one service account per VM instance is supported. - items: - type: object - additionalProperties: false - properties: - email: - type: string - description: | - Email address of the service account - scopes: - type: array - description: | - The list of scopes to be made available for this service account - items: - type: string - description: | - Access scope, e.g. 'https://www.googleapis.com/auth/compute.readonly' - Visit https://cloud.google.com/compute/docs/access/service-accounts#accesscopesiam - for more details - guestAccelerators: - type: array - uniqueItems: true - description: | - A list of the type and count of accelerator cards attached to the instance. - items: - type: object - additionalProperties: false - properties: - acceleratorType: - type: string - description: | - Full or partial URL of the accelerator type resource to attach to this instance. For example: projects/my-project/zones/us-central1-c/acceleratorTypes/nvidia-tesla-p100 - If you are creating an instance template, specify only the accelerator name. - See GPUs on Compute Engine for a full list of accelerator types. - acceleratorCount: - type: integer - description: | - The number of the guest accelerator cards exposed to this instance. - sourceInstanceTemplate: - type: string - description: | - Specifies instance template to create the instance. - This field is optional. It can be a full or partial URL. - See https://cloud.google.com/compute/docs/reference/rest/v1/instances/insert for details. - -outputs: - networkInterfaces: - type: array - description: | - A list of network interfaces of the new instance. - items: - type: object - properties: - externalIp: - type: string - description: Reference to the external ip address of the new instance - internalIp: - type: string - description: Reference to tbe internal ip address of the new instance - name: - type: string - description: A name of the instance resource - selfLink: - type: string - description: The URI (SelfLink) of the instance resource. - -documentation: - - templates/instance/README.md - -examples: - - templates/instance/examples/instance.yaml diff --git a/dm/templates/instance/tests/integration/instance.bats b/dm/templates/instance/tests/integration/instance.bats deleted file mode 100755 index 157e495b7f1..00000000000 --- a/dm/templates/instance/tests/integration/instance.bats +++ /dev/null @@ -1,100 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < "templates/instance/tests/integration/${TEST_NAME}.yaml" > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; this is executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - gcloud compute networks create "test-network-${RAND}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --description "integration test ${RAND}" \ - --subnet-mode custom - gcloud compute networks subnets create "test-subnet-${RAND}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --network "test-network-${RAND}" \ - --range 10.0.1.0/24 \ - --region us-central1 - fi - - # Per-test setup steps. -} - -function teardown() { - #Global teardown; this is executed once per test file - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - gcloud compute networks subnets delete "test-subnet-${RAND}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --region us-central1 -q - gcloud compute networks delete "test-network-${RAND}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -q - delete_config - fi - - # Per-test teardown steps. -} - - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" --config "${CONFIG}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -} - -@test "Verifying that a Compute Instance was created in deployment ${DEPLOYMENT_NAME}" { - run gcloud compute instances list --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - [[ "$output" =~ "test-instance-${RAND}" ]] -} - -@test "Verifying that the Compute Instance was connected to a custom network in deployment ${DEPLOYMENT_NAME}" { - run gcloud compute instances describe test-instance-${RAND} --zone "us-central1-a" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - [[ "$output" =~ "test-network-${RAND}" ]] -} - -@test "Verifying that the Compute Instance has the canIpForward property set in deployment ${DEPLOYMENT_NAME}" { - run gcloud compute instances describe test-instance-${RAND} --zone "us-central1-a" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - [[ "$output" =~ "canIpForward: true" ]] -} - -@test "Deleting deployment" { - gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" -q \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - run gcloud compute instances list --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ ! "$output" =~ "test-instance-${RAND}" ]] -} diff --git a/dm/templates/instance/tests/integration/instance.yaml b/dm/templates/instance/tests/integration/instance.yaml deleted file mode 100644 index 45d52409bae..00000000000 --- a/dm/templates/instance/tests/integration/instance.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# Test of the Instance template. -# -# Variables: -# RAND: A random string used by the testing suite. - -imports: - - path: templates/instance/instance.py - name: instance.py - -resources: - - name: test-instance-${RAND} - type: instance.py - properties: - zone: us-central1-a - diskImage: projects/ubuntu-os-cloud/global/images/family/ubuntu-1804-lts - diskSizeGb: 100 - machineType: f1-micro - diskType: pd-ssd - canIpForward: true - network: test-network-${RAND} - subnetwork: regions/us-central1/subnetworks/test-subnet-${RAND} - metadata: - items: - - key: startup-script - value: sudo apt-get update && sudo apt-get install -y nginx \ No newline at end of file diff --git a/dm/templates/instance/tests/integration/instance_1_nic.bats b/dm/templates/instance/tests/integration/instance_1_nic.bats deleted file mode 100755 index 379d0633dbc..00000000000 --- a/dm/templates/instance/tests/integration/instance_1_nic.bats +++ /dev/null @@ -1,87 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < "templates/instance/tests/integration/${TEST_NAME}.yaml" > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; this is executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - fi - - # Per-test setup steps. -} - -function teardown() { - #Global teardown; this is executed once per test file - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - rm -f "${RANDOM_FILE}" - delete_config - fi - - # Per-test teardown steps. -} - - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" --config "${CONFIG}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -} - -@test "Verifying that a Compute Instance was created in deployment ${DEPLOYMENT_NAME}" { - run gcloud compute instances list --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - [[ "$output" =~ "test-instance-${RAND}" ]] -} - -@test "Verifying that the Compute Instance was connected to the first custom network in deployment ${DEPLOYMENT_NAME}" { - run gcloud compute instances describe test-instance-${RAND} --zone "us-central1-a" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - [[ "$output" =~ "test-network-0-${RAND}" ]] -} - -@test "Verifying that the Compute Instance has the canIpForward property set in deployment ${DEPLOYMENT_NAME}" { - run gcloud compute instances describe test-instance-${RAND} --zone "us-central1-a" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - [[ "$output" =~ "canIpForward: true" ]] -} - -@test "Deleting deployment" { - gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" -q \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - run gcloud compute instances list --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ ! "$output" =~ "test-instance-${RAND}" ]] -} diff --git a/dm/templates/instance/tests/integration/instance_1_nic.yaml b/dm/templates/instance/tests/integration/instance_1_nic.yaml deleted file mode 100644 index 79f64593a03..00000000000 --- a/dm/templates/instance/tests/integration/instance_1_nic.yaml +++ /dev/null @@ -1,36 +0,0 @@ -# Test of the Instance template. -# -# Variables: -# RAND: A random string used by the testing suite. - -imports: - - path: templates/instance/instance.py - name: instance.py - -resources: - - name: test-instance-${RAND} - type: instance.py - properties: - zone: us-central1-a - diskImage: projects/ubuntu-os-cloud/global/images/family/ubuntu-1804-lts - diskSizeGb: 100 - machineType: f1-micro - diskType: pd-ssd - canIpForward: true - networks: - - network: $(ref.test-network-0-${RAND}.selfLink) - subnetwork: $(ref.test-subnetwork-0-${RAND}.selfLink) - metadata: - items: - - key: startup-script - value: sudo apt-get update && sudo apt-get install -y nginx - - name: test-network-0-${RAND} - type: compute.v1.network - properties: - autoCreateSubnetworks: false - - name: test-subnetwork-0-${RAND} - type: compute.v1.subnetwork - properties: - network: $(ref.test-network-0-${RAND}.selfLink) - ipCidrRange: 10.0.1.0/24 - region: us-central1 diff --git a/dm/templates/instance/tests/integration/instance_2_nics.bats b/dm/templates/instance/tests/integration/instance_2_nics.bats deleted file mode 100755 index ae17e82a193..00000000000 --- a/dm/templates/instance/tests/integration/instance_2_nics.bats +++ /dev/null @@ -1,94 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < "templates/instance/tests/integration/${TEST_NAME}.yaml" > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; this is executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - fi - - # Per-test setup steps. -} - -function teardown() { - #Global teardown; this is executed once per test file - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - rm -f "${RANDOM_FILE}" - delete_config - fi - - # Per-test teardown steps. -} - - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" --config "${CONFIG}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -} - -@test "Verifying that a Compute Instance was created in deployment ${DEPLOYMENT_NAME}" { - run gcloud compute instances list --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - [[ "$output" =~ "test-instance-${RAND}" ]] -} - -@test "Verifying that the Compute Instance was connected to the first custom network in deployment ${DEPLOYMENT_NAME}" { - run gcloud compute instances describe test-instance-${RAND} --zone "us-central1-a" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - [[ "$output" =~ "test-network-0-${RAND}" ]] -} - -@test "Verifying that the Compute Instance was connected to the second custom network in deployment ${DEPLOYMENT_NAME}" { - run gcloud compute instances describe test-instance-${RAND} --zone "us-central1-a" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - [[ "$output" =~ "test-network-1-${RAND}" ]] -} - -@test "Verifying that the Compute Instance has the canIpForward property set in deployment ${DEPLOYMENT_NAME}" { - run gcloud compute instances describe test-instance-${RAND} --zone "us-central1-a" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - [[ "$output" =~ "canIpForward: true" ]] -} - -@test "Deleting deployment" { - gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" -q \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - run gcloud compute instances list --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ ! "$output" =~ "test-instance-${RAND}" ]] -} diff --git a/dm/templates/instance/tests/integration/instance_2_nics.yaml b/dm/templates/instance/tests/integration/instance_2_nics.yaml deleted file mode 100644 index 7f99a8d28a4..00000000000 --- a/dm/templates/instance/tests/integration/instance_2_nics.yaml +++ /dev/null @@ -1,48 +0,0 @@ -# Test of the Instance template. -# -# Variables: -# RAND: A random string used by the testing suite. - -imports: - - path: templates/instance/instance.py - name: instance.py - -resources: - - name: test-instance-${RAND} - type: instance.py - properties: - zone: us-central1-a - diskImage: projects/ubuntu-os-cloud/global/images/family/ubuntu-1804-lts - diskSizeGb: 100 - machineType: f1-micro - diskType: pd-ssd - canIpForward: true - networks: - - network: $(ref.test-network-0-${RAND}.selfLink) - subnetwork: $(ref.test-subnetwork-0-${RAND}.selfLink) - - network: $(ref.test-network-1-${RAND}.selfLink) - subnetwork: $(ref.test-subnetwork-1-${RAND}.selfLink) - metadata: - items: - - key: startup-script - value: sudo apt-get update && sudo apt-get install -y nginx - - name: test-network-0-${RAND} - type: compute.v1.network - properties: - autoCreateSubnetworks: false - - name: test-network-1-${RAND} - type: compute.v1.network - properties: - autoCreateSubnetworks: false - - name: test-subnetwork-0-${RAND} - type: compute.v1.subnetwork - properties: - network: $(ref.test-network-0-${RAND}.selfLink) - ipCidrRange: 10.0.1.0/24 - region: us-central1 - - name: test-subnetwork-1-${RAND} - type: compute.v1.subnetwork - properties: - network: $(ref.test-network-1-${RAND}.selfLink) - ipCidrRange: 10.0.2.0/24 - region: us-central1 diff --git a/dm/templates/instance/tests/integration/instance_source_template.bats b/dm/templates/instance/tests/integration/instance_source_template.bats deleted file mode 100755 index 659ab12f64a..00000000000 --- a/dm/templates/instance/tests/integration/instance_source_template.bats +++ /dev/null @@ -1,100 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < "templates/instance/tests/integration/${TEST_NAME}.yaml" > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; this is executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - gcloud compute networks create "test-network-${RAND}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --description "integration test ${RAND}" \ - --subnet-mode custom - gcloud compute networks subnets create "test-subnet-${RAND}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --network "test-network-${RAND}" \ - --range 10.0.1.0/24 \ - --region us-central1 - fi - - # Per-test setup steps. -} - -function teardown() { - #Global teardown; this is executed once per test file - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - gcloud compute networks subnets delete "test-subnet-${RAND}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --region us-central1 -q - gcloud compute networks delete "test-network-${RAND}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -q - delete_config - fi - - # Per-test teardown steps. -} - - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" --config "${CONFIG}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" --verbosity debug -} - -@test "Verifying that a Compute Instance was created in deployment ${DEPLOYMENT_NAME}" { - run gcloud compute instances list --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - [[ "$output" =~ "test-instance-${RAND}" ]] -} - -@test "Verifying that the Compute Instance was connected to a custom network in deployment ${DEPLOYMENT_NAME}" { - run gcloud compute instances describe test-instance-${RAND} --zone "us-central1-a" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - [[ "$output" =~ "test-network-${RAND}" ]] -} - -@test "Verifying that the Compute Instance has the canIpForward property set in deployment ${DEPLOYMENT_NAME}" { - run gcloud compute instances describe test-instance-${RAND} --zone "us-central1-a" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - [[ "$output" =~ "canIpForward: true" ]] -} - -@test "Deleting deployment" { - gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" -q \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - run gcloud compute instances list --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ ! "$output" =~ "test-instance-${RAND}" ]] -} diff --git a/dm/templates/instance/tests/integration/instance_source_template.yaml b/dm/templates/instance/tests/integration/instance_source_template.yaml deleted file mode 100644 index a2afd48601a..00000000000 --- a/dm/templates/instance/tests/integration/instance_source_template.yaml +++ /dev/null @@ -1,31 +0,0 @@ -# Test of the Instance with sourceInstanceTemplate property provided. -# -# Variables: -# RAND: A random string used by the testing suite. - -imports: - - path: templates/instance_template/instance_template.py - name: instance_template.py - - path: templates/instance/instance.py - name: instance.py - -resources: - - name: test-instance-template-${RAND} - type: instance_template.py - properties: - diskImage: projects/ubuntu-os-cloud/global/images/family/ubuntu-1804-lts - diskSizeGb: 100 - machineType: f1-micro - diskType: pd-ssd - canIpForward: true - network: test-network-${RAND} - subnetwork: regions/us-central1/subnetworks/test-subnet-${RAND} - metadata: - items: - - key: startup-script - value: sudo apt-get update && sudo apt-get install -y nginx\ - - name: test-instance-${RAND} - type: instance.py - properties: - zone: us-central1-a - sourceInstanceTemplate: $(ref.test-instance-template-${RAND}.selfLink) diff --git a/dm/templates/instance/tests/integration/instance_template/instance_template.bats b/dm/templates/instance/tests/integration/instance_template/instance_template.bats deleted file mode 100755 index cc22489cb40..00000000000 --- a/dm/templates/instance/tests/integration/instance_template/instance_template.bats +++ /dev/null @@ -1,123 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" - export IMAGE="projects/ubuntu-os-cloud/global/images/family/ubuntu-1804-lts" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - envsubst < "templates/instance_template/tests/integration/${TEST_NAME}.yaml" > "${CONFIG}" -} - -function delete_config() { - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - rm -f "${RANDOM_FILE}" - delete_config - fi - - # Per-test teardown steps. -} - - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - run gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config "${CONFIG}" --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] -} - -@test "Verifying instance template disk properties" { - run gcloud compute instance-templates describe it-${RAND} \ - --format "yaml(properties.disks[0].initializeParams)" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "diskType: pd-ssd" ]] - [[ "$output" =~ "sourceImage: ${IMAGE}" ]] - [[ "$output" =~ "diskSizeGb: '50'" ]] -} - -@test "Verifying instance spec properties" { - run gcloud compute instance-templates describe it-${RAND} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "machineType: f1-micro" ]] - [[ "$output" =~ "description: Instance description" ]] - [[ "$output" =~ "canIpForward: true" ]] -} - -@test "Verifying instance template properties" { - run gcloud compute instance-templates describe it-${RAND} \ - --format "value(name, description, properties.labels)" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "Template description" ]] - [[ "$output" =~ "it-${RAND}" ]] - [[ "$output" =~ "name=wrench" ]] -} - -@test "Verifying instance template network tags" { - run gcloud compute instance-templates describe it-${RAND} \ - --format "yaml(properties.tags)" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "ftp" ]] - [[ "$output" =~ "https" ]] -} - -@test "Verifying instance template metadata" { - run gcloud compute instance-templates describe it-${RAND} \ - --format "yaml(properties.metadata)" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "key: createdBy" ]] - [[ "$output" =~ "value: unitTest" ]] -} - -@test "Verifying instance template network properties" { - NET="https://www.googleapis.com/compute/v1/projects/${CLOUD_FOUNDATION_PROJECT_ID}/global/networks/test-network-${RAND}" - run gcloud compute instance-templates describe it-${RAND} \ - --format "yaml(properties.networkInterfaces[0])" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "name: External NAT" ]] - [[ "$output" =~ "type: ONE_TO_ONE_NAT" ]] - [[ "$output" =~ "network: ${NET}" ]] -} - -@test "Deleting deployment" { - run gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" -q \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] -} diff --git a/dm/templates/instance/tests/integration/instance_template/instance_template.yaml b/dm/templates/instance/tests/integration/instance_template/instance_template.yaml deleted file mode 100644 index 847fbce2716..00000000000 --- a/dm/templates/instance/tests/integration/instance_template/instance_template.yaml +++ /dev/null @@ -1,37 +0,0 @@ -# Test of the Instance Template template. -# -# Variables: -# RAND: a random string used by the testing suite -# IMAGE: a URL to the base disk image provided by the testing suite - -imports: - - path: templates/instance_template/instance_template.py - name: instance_template.py - -resources: - - name: instance-template-${RAND} - type: instance_template.py - properties: - name: it-${RAND} - instanceDescription: Instance description - templateDescription: Template description - network: $(ref.test-network-${RAND}.selfLink) - diskImage: ${IMAGE} - machineType: f1-micro - canIpForward: true - diskType: pd-ssd - diskSizeGb: 50 - tags: - items: - - ftp - - https - metadata: - items: - - key: createdBy - value: unitTest - labels: - name: wrench - - name: test-network-${RAND} - type: compute.v1.network - properties: - autoCreateSubnetworks: true diff --git a/dm/templates/instance_template/README.md b/dm/templates/instance_template/README.md deleted file mode 100644 index 753e33ae54f..00000000000 --- a/dm/templates/instance_template/README.md +++ /dev/null @@ -1,69 +0,0 @@ -# Instance Template - -This template creates an instance template. - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, setup billing, enable requisite APIs](../project/README.md) -- Grant the [compute.admin](https://cloud.google.com/compute/docs/access/iam) IAM -role to the [Deployment Manager service account](https://cloud.google.com/deployment-manager/docs/access-control#access_control_for_deployment_manager) - -## Deployment - -### Resources - -- [compute.v1.instanceTemplate](https://cloud.google.com/compute/docs/reference/latest/instanceTemplates) - -### Properties - -See the `properties` section in the schema file(s): - -- [Instance Template](instance_template.py.schema) - -### Usage - -1. Clone the [Deployment Manager samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit) - -```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit -``` - -2. Go to the [dm](../../) directory - -```shell - cd dm -``` - -3. Copy the example DM config to be used as a model for the deployment, in this - case [examples/instance\_template.yaml](examples/instance_template.yaml) - -```shell - cp templates/instance_template/examples/instance_template.yaml \ - my_instance_template.yaml -``` - -4. Change the values in the config file to match your specific GCP setup. - Refer to the properties in the schema files described above. - -```shell - vim my_instance_template.yaml # <== change values to match your GCP setup -``` - -5. Create your deployment as described below, replacing - with your with your own deployment name - -```shell - gcloud deployment-manager deployments create \ - --config my_instance_template.yaml -``` - -6. In case you need to delete your deployment: - -```shell - gcloud deployment-manager deployments delete -``` - -## Examples - -- [Instance Template](examples/instance_template.yaml) diff --git a/dm/templates/instance_template/examples/instance_template.yaml b/dm/templates/instance_template/examples/instance_template.yaml deleted file mode 100644 index f7103769941..00000000000 --- a/dm/templates/instance_template/examples/instance_template.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# Example of the Instance Template template usage. -# -# In this example, an instance template with Nginx is created. - -imports: - - path: templates/instance_template/instance_template.py - name: instance_template.py - -resources: - - name: instance-template-example - type: instance_template.py - properties: - diskImage: projects/ubuntu-os-cloud/global/images/family/ubuntu-1804-lts - networks: - - network: default - accessConfigs: - - type: ONE_TO_ONE_NAT - machineType: f1-micro - tags: - items: - - http - metadata: - items: - - key: startup-script - value: apt-get update && apt-get install -y nginx diff --git a/dm/templates/instance_template/instance_template.py b/dm/templates/instance_template/instance_template.py deleted file mode 100644 index f10c0393a3e..00000000000 --- a/dm/templates/instance_template/instance_template.py +++ /dev/null @@ -1,170 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" This template creates an Instance Template. """ - - -def set_optional_property(receiver, source, property_name, rename_to=None): - """ If set, copies the given property value from one object to another - and optionally rename it. - """ - - rename_to = rename_to or property_name - if property_name in source: - receiver[rename_to] = source[property_name] - - -def create_boot_disk(properties): - """ Creates the boot disk configuration. """ - - boot_disk = { - 'deviceName': 'boot', - 'type': 'PERSISTENT', - 'boot': True, - 'autoDelete': True, - 'initializeParams': { - 'sourceImage': properties['diskImage'] - } - } - - for prop in ['diskSizeGb', 'diskType']: - set_optional_property(boot_disk['initializeParams'], properties, prop) - - return boot_disk - - -def get_network_interfaces(properties): - """ Get the configuration that connects the instance to an existing network - and assigns to it an ephemeral public IP if specified. - """ - network_interfaces = [] - - networks = properties.get('networks', []) - if len(networks) == 0 and properties.get('network'): - network = { - "network": properties.get('network'), - "subnetwork": properties.get('subnetwork'), - "networkIP": properties.get('networkIP'), - } - networks.append(network) - if (properties.get('hasExternalIp')): - network['accessConfigs'] = [{ - "type": "ONE_TO_ONE_NAT", - }] - if properties.get('natIP'): - network['accessConfigs'][0]["natIp"] = properties.get('natIP') - - for network in networks: - if not '.' in network['network'] and not '/' in network['network']: - network_name = 'global/networks/{}'.format(network['network']) - else: - network_name = network['network'] - - network_interface = { - 'network': network_name, - } - - netif_optional_props = ['subnetwork', 'networkIP', 'aliasIpRanges', 'accessConfigs'] - for prop in netif_optional_props: - if network.get(prop): - network_interface[prop] = network[prop] - network_interfaces.append(network_interface) - - return network_interfaces - - -def generate_config(context): - """ Entry point for the deployment resources. """ - - properties = context.properties - name = properties.get('name', context.env['name']) - machine_type = properties['machineType'] - network_interfaces = get_network_interfaces(context.properties) - project_id = properties.get('project', context.env['project']) - instance_template = { - 'name': context.env['name'], - # https://cloud.google.com/compute/docs/reference/rest/v1/instanceTemplates - 'type': 'gcp-types/compute-v1:instanceTemplates', - 'properties': - { - 'name': name, - 'project': project_id, - 'properties': - { - 'machineType': machine_type, - 'networkInterfaces': network_interfaces - } - } - } - - template_spec = instance_template['properties']['properties'] - - optional_props = [ - 'metadata', - 'disks', - 'scheduling', - 'tags', - 'canIpForward', - 'labels', - 'serviceAccounts', - 'scheduling', - 'shieldedInstanceConfig', - 'minCpuPlatform', - 'guestAccelerators', - ] - - for prop in optional_props: - set_optional_property(template_spec, properties, prop) - if not template_spec.get('disks'): - template_spec['disks'] = [create_boot_disk(properties)] - - set_optional_property( - template_spec, - properties, - 'instanceDescription', - 'description' - ) - - set_optional_property( - instance_template['properties'], - properties, - 'templateDescription', - 'description' - ) - - set_optional_property( - instance_template['properties'], - properties, - 'sourceInstance' - ) - - set_optional_property( - instance_template['properties'], - properties, - 'sourceInstanceParams' - ) - - return { - 'resources': [instance_template], - 'outputs': - [ - { - 'name': 'name', - 'value': name - }, - { - 'name': 'selfLink', - 'value': '$(ref.{}.selfLink)'.format(context.env['name']) - } - ] - } diff --git a/dm/templates/instance_template/instance_template.py.schema b/dm/templates/instance_template/instance_template.py.schema deleted file mode 100644 index ed97c7bd8d1..00000000000 --- a/dm/templates/instance_template/instance_template.py.schema +++ /dev/null @@ -1,768 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Instance Template - author: Sourced Group Inc. - version: 1.0.0 - description: | - Creates an instance template. - - For more information on this resource: - https://cloud.google.com/compute/ - - APIs endpoints used by this template: - - gcp-types/compute-v1:instanceTemplates => - https://cloud.google.com/compute/docs/reference/rest/v1/instanceTemplates - -required: - - diskImage - -oneOf: - - allOf: - - required: - - networks - - properties: - networks: - minItems: 1 - - not: - required: - - network - - not: - required: - - natIP - - not: - required: - - subnetwork - - not: - required: - - networkIP - - allOf: - - required: - - network - - not: - required: - - networks - -additionalProperties: false - -definitions: - hasExternalIp: - type: boolean - default: true - description: | - Defines wether the instance will use an external IP from a shared - ephemeral IP address pool. If this is set to false, the instance - will not have an external IP. - natIP: - type: string - description: | - An external IP address associated with this instance. Specify an unused - static external IP address available to the project or leave this field - undefined to use an IP from a shared ephemeral IP address pool. If you - specify a static external IP address, it must live in the same region - as the zone of the instance. - If hasExternalIp is false this field is ignored. - network: - type: string - description: | - URL of the network resource for this instance. When creating an instance, if neither the network - nor the subnetwork is specified, the default network global/networks/default is used; - if the network is not specified but the subnetwork is specified, the network is inferred. - - If you specify this property, you can specify the network as a full or partial URL. - For example, the following are all valid URLs: - - - https://www.googleapis.com/compute/v1/projects/project/global/networks/network - - projects/project/global/networks/network - - global/networks/default - Authorization requires one or more of the following Google IAM permissions on the specified resource network: - - - compute.networks.use - - compute.networks.useExternalIp - subnetwork: - type: string - description: | - The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, - do not specify this field. If the network is in auto subnet mode, specifying the subnetwork is optional. - If the network is in custom subnet mode, specifying the subnetwork is required. - If you specify this field, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - - - https://www.googleapis.com/compute/v1/projects/project/regions/region/subnetworks/subnetwork - - regions/region/subnetworks/subnetwork - Authorization requires one or more of the following Google IAM permissions on the specified resource subnetwork: - - - compute.subnetworks.use - - compute.subnetworks.useExternalIp - networkIP: - type: string - description: | - An IPv4 internal IP address to assign to the instance for this network interface. - If not specified by the user, an unused internal IP is assigned by the system. - -properties: - name: - type: string - description: The name of the instance template resource. Resource name would be used if omitted. - project: - type: string - description: | - The project ID of the project containing the instance. - templateDescription: - type: string - description: | - The resource description (optional). - instanceDescription: - type: string - description: | - The description of the instance resource the instance template - will create (optional). - network: - $ref: '#/definitions/network' - subnetwork: - $ref: '#/definitions/subnetwork' - networkIP: - $ref: '#/definitions/networkIP' - hasExternalIp: - $ref: '#/definitions/hasExternalIp' - natIP: - $ref: '#/definitions/natIP' - networks: - type: array - description: | - Networks the instance will be connected to; - e.g., 'my-custom-network' or 'default'. - items: - type: object - additionalProperties: false - required: - - network - properties: - network: - $ref: '#/definitions/network' - subnetwork: - $ref: '#/definitions/subnetwork' - networkIP: - $ref: '#/definitions/networkIP' - aliasIpRanges: - type: array - uniqueItems: true - description: | - An array of alias IP ranges for this network interface. You can only specify this - field for network interfaces in VPC networks. - items: - type: object - additionalProperties: false - properties: - ipCidrRange: - type: string - description: | - The IP alias ranges to allocate for this interface. This IP CIDR range must belong - to the specified subnetwork and cannot contain IP addresses reserved by system or - used by other network interfaces. This range may be a single IP address (such as 10.2.3.4), - a netmask (such as /24) or a CIDR-formatted string (such as 10.1.2.0/24). - subnetworkRangeName: - type: string - description: | - The name of a subnetwork secondary IP range from which to allocate an IP alias range. - If not specified, the primary range of the subnetwork is used. - accessConfigs: - type: array - uniqueItems: true - description: | - An array of configurations for this interface. Currently, only one access config, ONE_TO_ONE_NAT, - is supported. If there are no accessConfigs specified, then this instance will have no external internet access. - items: - type: object - additionalProperties: false - properties: - type: - type: string - description: | - The type of configuration. The default and only option is ONE_TO_ONE_NAT. - enum: - - ONE_TO_ONE_NAT - name: - type: string - description: | - The name of this access configuration. The default and recommended name is External NAT, - but you can use any arbitrary string, such as My external IP or Network Access. - setPublicPtr: - type: boolean - description: | - Specifies whether a public DNS 'PTR' record should be created to map the external - IP address of the instance to a DNS domain name. - publicPtrDomainName: - type: string - description: | - The DNS domain name for the public PTR record. You can set this field only - if the setPublicPtr field is enabled. - networkTier: - type: string - description: | - This signifies the networking tier used for configuring this access configuration - and can only take the following values: PREMIUM, STANDARD. - - If an AccessConfig is specified without a valid external IP address, an - ephemeral IP will be created with this networkTier. - - If an AccessConfig with a valid external IP address is specified, it must match - that of the networkTier associated with the Address resource owning that IP. - enum: - - STANDARD - - PREMIUM - natIP: - $ref: '#/definitions/natIP' - disks: - type: array - uniqueItems: true - description: | - Array of disks associated with this instance. Persistent disks must be created before you can assign them. - items: - type: object - additionalProperties: false - oneOf: - - required: - - source - - required: - - initializeParams - - allOf: - - not: - required: - - source - - not: - required: - - initializeParams - properties: - type: - type: string - description: | - Specifies the type of the disk, either SCRATCH or PERSISTENT. If not specified, the default is PERSISTENT. - enum: - - SCRATCH - - PERSISTENT - mode: - type: string - description: | - The mode in which to attach this disk, either READ_WRITE or READ_ONLY. - If not specified, the default is to attach the disk in READ_WRITE mode. - enum: - - READ_WRITE - - READ_ONLY - source: - type: string - description: | - Specifies a valid partial or full URL to an existing Persistent Disk resource. - When creating a new instance, one of initializeParams.sourceImage or - disks.source is required except for local SSD. - - If desired, you can also attach existing non-root persistent disks using this property. - This field is only applicable for persistent disks. - - Note that for InstanceTemplate, specify the disk name, not the URL for the disk. - - Authorization requires one or more of the following Google IAM permissions on the specified resource source: - - compute.disks.use - compute.disks.useReadOnly - deviceName: - type: string - description: | - Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* - tree of a Linux operating system running within the instance. This name can be used to reference - the device for mounting, resizing, and so on, from within the instance. - - If not specified, the server chooses a default device name to apply to this disk, in the - form persistent-disk-x, where x is a number assigned by Google Compute Engine. - This field is only applicable for persistent disks. - boot: - type: boolean - description: | - Indicates that this is a boot disk. The virtual machine will use the first partition - of the disk for its root filesystem. - initializeParams: - type: object - additionalProperties: false - description: | - Specifies the parameters for a new disk that will be created alongside the new instance. - Use initialization parameters to create boot disks or local SSDs attached to the new instance. - - This property is mutually exclusive with the source property; you can only define one or the other, but not both. - properties: - labels: - type: object - description: | - Labels to apply to this disk. These can be later modified by the disks.setLabels method. - This field is only applicable for persistent disks. - - An object containing a list of "key": value pairs. - Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. - - Authorization requires the following Google IAM permission on the specified resource labels: - - compute.disks.setLabels - diskName: - type: string - description: | - Specifies the disk name. If not specified, the default is to use the name of the instance. - If the disk with the instance name exists already in the given zone/region, - a new name will be automatically generated. - sourceImage: - type: string - description: | - The source image to create this disk. When creating a new instance, one of - initializeParams.sourceImage or disks.source is required except for local SSD. - - To create a disk with one of the public operating system images, specify the image by its family name. - For example, specify family/debian-9 to use the latest Debian 9 image: - - projects/debian-cloud/global/images/family/debian-9 - - Alternatively, use a specific version of a public operating system image: - - projects/debian-cloud/global/images/debian-9-stretch-vYYYYMMDD - - To create a disk with a custom image that you created, specify the image name in the following format: - - global/images/my-custom-image - - You can also specify a custom image by its image family, which returns the latest version of the - image in that family. Replace the image name with family/family-name: - - global/images/family/my-image-family - - If the source image is deleted later, this field will not be set. - - Authorization requires the following Google IAM permission on the specified resource sourceImage: - - compute.images.useReadOnly - description: - type: string - description: | - An optional description. Provide this property when creating the disk. - diskSizeGb: - type: number - description: | - Specifies the size of the disk in base-2 GB. - diskType: - type: string - description: | - Specifies the disk type to use to create the instance. If not specified, the default is pd-standard, - specified using the full URL. For example: - - https://www.googleapis.com/compute/v1/projects/project/zones/zone/diskTypes/pd-standard - - Other values include pd-ssd and local-ssd. If you define this field, you can provide either the full - or partial URL. For example, the following are valid values: - - https://www.googleapis.com/compute/v1/projects/project/zones/zone/diskTypes/diskType - projects/project/zones/zone/diskTypes/diskType - zones/zone/diskTypes/diskType - Note that for InstanceTemplate, this is the name of the disk type, not URL. - enum: - - pd-standard - - pd-ssd - - local-ssd - sourceImageEncryptionKey: - type: object - additionalProperties: false - description: | - The customer-supplied encryption key of the source image. Required if the source image is - protected by a customer-supplied encryption key. - - Instance templates do not store customer-supplied encryption keys, so you cannot create disks - for instances in a managed instance group if the source images are encrypted with your own keys. - properties: - rawKey: - type: string - description: | - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 - to either encrypt or decrypt this resource. - kmsKeyName: - type: string - description: | - The name of the encryption key that is stored in Google Cloud KMS. - sourceSnapshot: - type: string - description: | - The source snapshot to create this disk. When creating a new instance, one of - initializeParams.sourceSnapshot or disks.source is required except for local SSD. - - To create a disk with a snapshot that you created, specify the snapshot name in the following format: - - global/snapshots/my-backup - - If the source snapshot is deleted later, this field will not be set. - - Authorization requires the following Google IAM permission on the specified resource sourceSnapshot: - - compute.snapshots.useReadOnly - sourceSnapshotEncryptionKey: - type: object - additionalProperties: false - description: | - The customer-supplied encryption key of the source snapshot. - properties: - rawKey: - type: string - description: | - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 - to either encrypt or decrypt this resource. - kmsKeyName: - type: string - description: | - The name of the encryption key that is stored in Google Cloud KMS. - autoDelete: - type: boolean - description: | - Specifies whether the disk will be auto-deleted when the instance is deleted - (but not when the disk is detached from the instance). - interface: - type: string - description: | - Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. - The default is SCSI. Persistent disks must always use SCSI and the request will fail if you - attempt to attach a persistent disk in any other format than SCSI. Local SSDs can use either NVME or SCSI. - For performance characteristics of SCSI over NVMe, see Local SSD performance. - enum: - - SCSI - - NVME - guestOsFeatures: - type: array - uniqueItems: true - description: | - A list of features to enable on the guest operating system. Applicable only for bootable images. - Read Enabling guest operating system features to see a list of available options. - items: - type: object - additionalProperties: false - properties: - type: - type: string - description: | - https://cloud.google.com/compute/docs/images/create-delete-deprecate-private-images#guest-os-features - The ID of a supported feature. Read Enabling guest operating system features - to see a list of available options. - enum: - - MULTI_IP_SUBNET - - SECURE_BOOT - - UEFI_COMPATIBLE - - VIRTIO_SCSI_MULTIQUEUE - - WINDOWS - diskEncryptionKey: - type: object - additionalProperties: false - description: | - The customer-supplied encryption key of the source snapshot. - properties: - rawKey: - type: string - description: | - Encrypts or decrypts a disk using a customer-supplied encryption key. - - If you are creating a new disk, this field encrypts the new disk using an encryption - key that you provide. If you are attaching an existing disk that is already encrypted, - this field decrypts the disk using the customer-supplied encryption key. - - If you encrypt a disk using a customer-supplied key, you must provide the same key again when - you attempt to use this resource at a later time. For example, you must provide the key when - you create a snapshot or an image from the disk or when you attach the disk - to a virtual machine instance. - - If you do not provide an encryption key, then the disk will be encrypted using an automatically - generated key and you do not need to provide a key to use the disk later. - - Instance templates do not store customer-supplied encryption keys, so you cannot use your own keys - to encrypt disks in a managed instance group. - kmsKeyName: - type: string - description: | - The name of the encryption key that is stored in Google Cloud KMS. - machineType: - type: string - default: n1-standard-1 - description: | - The Compute Instance type; e.g., 'n1-standard-1'. - See https://cloud.google.com/compute/docs/machine-types for details. - scheduling: - type: object - additionalProperties: false - description: | - Sets the scheduling options for this instance. - properties: - onHostMaintenance: - type: string - description: | - Defines the maintenance behavior for this instance. For standard instances, the default behavior is MIGRATE. - For preemptible instances, the default and only possible behavior is TERMINATE. - For more information, see Setting Instance Scheduling Options. - enum: - - MIGRATE - - TERMINATE - automaticRestart: - type: boolean - description: | - Specifies whether the instance should be automatically restarted if it is terminated by Compute Engine - (not terminated by a user). You can only set the automatic restart option for standard instances. - Preemptible instances cannot be automatically restarted. - - By default, this is set to true so an instance is automatically restarted if it is terminated by Compute Engine. - preemptible: - type: boolean - description: | - Defines whether the instance is preemptible. This can only be set during instance creation, - it cannot be set or changed after the instance has been created. - nodeAffinities: - type: array - uniqueItems: true - description: | - A set of node affinity and anti-affinity. - items: - type: object - additionalProperties: false - properties: - key: - type: string - description: | - Corresponds to the label key of Node resource. - operator: - type: string - description: | - Defines the operation of node selection. - values: - type: array - uniqueItems: true - description: | - Corresponds to the label values of Node resource. - items: - type: string - minCpuPlatform: - type: string - description: | - Specifies a minimum CPU platform for the VM instance. Applicable values are the friendly names of CPU platforms, - such as minCpuPlatform: "Intel Haswell" or minCpuPlatform: "Intel Sandy Bridge". - enum: - - Intel Sandy Bridge - - Intel Ivy Bridge - - Intel Haswell - - Intel Broadwell - - Intel Skylake - sourceInstance: - type: string - description: | - The source instance used to create the template. You can provide this as a partial or full URL to the resource. - For example, the following are valid values: - - - https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance - - projects/project/zones/zone/instances/instance - - Authorization requires the following Google IAM permission on the specified resource sourceInstance: - - compute.instances.get - sourceInstanceParams: - type: object - additionalProperties: false - description: | - The source instance params to use to create this instance template. - properties: - diskConfigs: - type: array - uniqueItems: true - description: | - Attached disks configuration. If not provided, defaults are applied: For boot disk and any other R/W disks, - new custom images will be created from each disk. For read-only disks, they will be attached - in read-only mode. Local SSD disks will be created as blank volumes. - items: - type: object - additionalProperties: false - properties: - deviceName: - type: string - description: | - Specifies the device name of the disk to which the configurations apply to. - instantiateFrom: - type: string - description: | - Specifies whether to include the disk and what image to use. Possible values are: - - - source-image: to use the same image that was used to create the source instance's corresponding disk. - Applicable to the boot disk and additional read-write disks. - - source-image-family: to use the same image family that was used to create the source instance's - corresponding disk. Applicable to the boot disk and additional read-write disks. - - custom-image: to use a user-provided image url for disk creation. Applicable to the boot disk and - additional read-write disks. - - attach-read-only: to attach a read-only disk. Applicable to read-only disks. - - do-not-include: to exclude a disk from the template. Applicable to additional read-write disks, - local SSDs, and read-only disks. - enum: - - source-image - - source-image-family - - custom-image - - attach-read-only - autoDelete: - type: boolean - description: | - Specifies whether the disk will be auto-deleted when the instance is deleted - (but not when the disk is detached from the instance). - customImage: - type: string - description: | - The custom source image to be used to restore this disk when instantiating this instance template.. - shieldedInstanceConfig: - type: object - additionalProperties: false - properties: - enableSecureBoot: - type: boolean - description: | - Defines whether the instance has Secure Boot enabled. - enableVtpm: - type: boolean - description: | - Defines whether the instance has the vTPM enabled. - enableIntegrityMonitoring: - type: boolean - description: | - Defines whether the instance has integrity monitoring enabled. - guestAccelerators: - type: array - uniqueItems: true - description: | - A list of the type and count of accelerator cards attached to the instance. - items: - type: object - additionalProperties: false - properties: - acceleratorType: - type: string - description: | - Full or partial URL of the accelerator type resource to attach to this instance. For example: projects/my-project/zones/us-central1-c/acceleratorTypes/nvidia-tesla-p100 - If you are creating an instance template, specify only the accelerator name. - See GPUs on Compute Engine for a full list of accelerator types. - acceleratorCount: - type: integer - description: | - The number of the guest accelerator cards exposed to this instance. - canIpForward: - type: boolean - description: | - Defines whether the instance is allowed to send and receive packets - with non-matching destination or source IPs. - diskType: - type: string - default: pd-standard - enum: - - pd-ssd - - pd-standard - - local-ssd - description: Boot disk type - diskImage: - type: string - description: | - The source image to create the disk. To create the disk with one of the - public operating system images, specify the image by its family name. - For example, specify family/debian-9 to use the latest Debian 9 image, - projects/debian-cloud/global/images/family/debian-9. - To create a disk with a custom image, specify the image - name in the global/images/my-custom-image format. - See https://cloud.google.com/compute/docs/images for details. - diskSizeGb: - type: integer - minimum: 10 - metadata: - type: object - additionalProperties: false - description: | - Instance metadata. - For example: - metadata: - items: - - key: startup-script - - value: sudo apt-get update - properties: - items: - type: array - uniqueItems: true - description: The metadata key-value pairs. - items: - type: object - additionalProperties: false - required: - - key - - value - properties: - key: - type: string - value: - type: string - serviceAccounts: - type: array - uniqueItems: true - description: | - The list of service accounts, with their specified scopes, authorized for - this instance. Only one service account per VM instance is supported. - items: - type: object - additionalProperties: false - properties: - email: - type: string - description: The email address of the service account. - scopes: - type: array - uniqueItems: true - description: | - The list of scopes to be made available to the service account. - items: - type: string - description: | - The access scope; - e.g., 'https://www.googleapis.com/auth/compute.readonly'. - See https://cloud.google.com/compute/docs/access/service-accounts#accesscopesiam - for details - tags: - type: object - additionalProperties: false - description: | - The list of tags to apply to the instances that are created from the - template. The tags identify valid sources or targets for network - firewalls. - properties: - items: - type: array - uniqueItems: true - description: The array of tags. - items: - type: string - labels: - type: object - description: | - The labels to apply to instances created from the template. - Example: - name: wrench - mass: 1.3kg - count: 3 - -outputs: - name: - type: string - description: The instance template name. - selfLink: - type: string - description: The URI (SelfLink) of the instance template resource. - -documentation: - - templates/instance_template/README.md - -examples: - - templates/instance_template/examples/instance_template.yaml diff --git a/dm/templates/instance_template/tests/integration/instance_template.bats b/dm/templates/instance_template/tests/integration/instance_template.bats deleted file mode 100755 index c459295ca6b..00000000000 --- a/dm/templates/instance_template/tests/integration/instance_template.bats +++ /dev/null @@ -1,122 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" - export IMAGE="projects/ubuntu-os-cloud/global/images/family/ubuntu-1804-lts" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - envsubst < "templates/instance_template/tests/integration/${TEST_NAME}.yaml" > "${CONFIG}" -} - -function delete_config() { - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - rm -f "${RANDOM_FILE}" - delete_config - fi - - # Per-test teardown steps. -} - - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - run gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config "${CONFIG}" --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] -} - -@test "Verifying instance template disk properties" { - run gcloud compute instance-templates describe it-${RAND} \ - --format "yaml(properties.disks[0].initializeParams)" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "diskType: pd-ssd" ]] - [[ "$output" =~ "sourceImage: ${IMAGE}" ]] - [[ "$output" =~ "diskSizeGb: '50'" ]] -} - -@test "Verifying instance spec properties" { - run gcloud compute instance-templates describe it-${RAND} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "machineType: f1-micro" ]] - [[ "$output" =~ "description: Instance description" ]] - [[ "$output" =~ "canIpForward: true" ]] -} - -@test "Verifying instance template properties" { - run gcloud compute instance-templates describe it-${RAND} \ - --format "value(name, description, properties.labels)" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "Template description" ]] - [[ "$output" =~ "it-${RAND}" ]] - [[ "$output" =~ "name=wrench" ]] -} - -@test "Verifying instance template network tags" { - run gcloud compute instance-templates describe it-${RAND} \ - --format "yaml(properties.tags)" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "ftp" ]] - [[ "$output" =~ "https" ]] -} - -@test "Verifying instance template metadata" { - run gcloud compute instance-templates describe it-${RAND} \ - --format "yaml(properties.metadata)" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "key: createdBy" ]] - [[ "$output" =~ "value: unitTest" ]] -} - -@test "Verifying instance template network properties" { - NET="https://www.googleapis.com/compute/v1/projects/${CLOUD_FOUNDATION_PROJECT_ID}/global/networks/test-network-${RAND}" - run gcloud compute instance-templates describe it-${RAND} \ - --format "yaml(properties.networkInterfaces[0])" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "type: ONE_TO_ONE_NAT" ]] - [[ "$output" =~ "network: ${NET}" ]] -} - -@test "Deleting deployment" { - run gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" -q \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] -} diff --git a/dm/templates/instance_template/tests/integration/instance_template.yaml b/dm/templates/instance_template/tests/integration/instance_template.yaml deleted file mode 100644 index 7ea71b774d7..00000000000 --- a/dm/templates/instance_template/tests/integration/instance_template.yaml +++ /dev/null @@ -1,41 +0,0 @@ -# Test of the Instance Template template. -# -# Variables: -# RAND: a random string used by the testing suite -# IMAGE: a URL to the base disk image provided by the testing suite - -imports: - - path: templates/instance_template/instance_template.py - name: instance_template.py - -resources: - - name: instance-template-${RAND} - type: instance_template.py - properties: - name: it-${RAND} - instanceDescription: Instance description - templateDescription: Template description - networks: - - network: $(ref.test-network-${RAND}.selfLink) - accessConfigs: - - name: External NAT - type: ONE_TO_ONE_NAT - diskImage: ${IMAGE} - machineType: f1-micro - canIpForward: true - diskType: pd-ssd - diskSizeGb: 50 - tags: - items: - - ftp - - https - metadata: - items: - - key: createdBy - value: unitTest - labels: - name: wrench - - name: test-network-${RAND} - type: compute.v1.network - properties: - autoCreateSubnetworks: true diff --git a/dm/templates/instance_template/tests/integration/instance_template_networks.bats b/dm/templates/instance_template/tests/integration/instance_template_networks.bats deleted file mode 100755 index a697b865226..00000000000 --- a/dm/templates/instance_template/tests/integration/instance_template_networks.bats +++ /dev/null @@ -1,134 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="$(echo ${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND} | head -c 63)" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" - export IMAGE="projects/ubuntu-os-cloud/global/images/family/ubuntu-1804-lts" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - envsubst < "templates/instance_template/tests/integration/${TEST_NAME}.yaml" > "${CONFIG}" -} - -function delete_config() { - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - rm -f "${RANDOM_FILE}" - delete_config - fi - - # Per-test teardown steps. -} - - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - run gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config "${CONFIG}" --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] -} - -@test "Verifying instance template disk properties" { - run gcloud compute instance-templates describe it-${RAND} \ - --format "yaml(properties.disks[0].initializeParams)" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "diskType: pd-ssd" ]] - [[ "$output" =~ "sourceImage: ${IMAGE}" ]] -# [[ "$output" =~ "diskSizeGb: '50'" ]] -} - -@test "Verifying instance spec properties" { - run gcloud compute instance-templates describe it-${RAND} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "machineType: f1-micro" ]] - [[ "$output" =~ "description: Instance description" ]] - [[ "$output" =~ "canIpForward: true" ]] -} - -@test "Verifying instance template properties" { - run gcloud compute instance-templates describe it-${RAND} \ - --format "value(name, description, properties.labels)" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "Template description" ]] - [[ "$output" =~ "it-${RAND}" ]] - [[ "$output" =~ "name=wrench" ]] -} - -@test "Verifying instance template network tags" { - run gcloud compute instance-templates describe it-${RAND} \ - --format "yaml(properties.tags)" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "ftp" ]] - [[ "$output" =~ "https" ]] -} - -@test "Verifying instance template metadata" { - run gcloud compute instance-templates describe it-${RAND} \ - --format "yaml(properties.metadata)" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "key: createdBy" ]] - [[ "$output" =~ "value: unitTest" ]] -} - -@test "Verifying instance template first network properties" { - NET="https://www.googleapis.com/compute/v1/projects/${CLOUD_FOUNDATION_PROJECT_ID}/global/networks/test-network-0-${RAND}" - run gcloud compute instance-templates describe it-${RAND} \ - --format "yaml(properties.networkInterfaces[0])" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "name: External NAT" ]] - [[ "$output" =~ "type: ONE_TO_ONE_NAT" ]] - [[ "$output" =~ "network: ${NET}" ]] -} - -@test "Verifying instance template second network properties" { - NET="https://www.googleapis.com/compute/v1/projects/${CLOUD_FOUNDATION_PROJECT_ID}/global/networks/test-network-1-${RAND}" - run gcloud compute instance-templates describe it-${RAND} \ - --format "yaml(properties.networkInterfaces[1])" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "name: External NAT" ]] - [[ "$output" =~ "type: ONE_TO_ONE_NAT" ]] - [[ "$output" =~ "network: ${NET}" ]] -} - -@test "Deleting deployment" { - run gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" -q \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] -} diff --git a/dm/templates/instance_template/tests/integration/instance_template_networks.yaml b/dm/templates/instance_template/tests/integration/instance_template_networks.yaml deleted file mode 100644 index 5974fd46f01..00000000000 --- a/dm/templates/instance_template/tests/integration/instance_template_networks.yaml +++ /dev/null @@ -1,63 +0,0 @@ -# Test of the Instance Template template. -# -# Variables: -# RAND: a random string used by the testing suite -# IMAGE: a URL to the base disk image provided by the testing suite - -imports: - - path: templates/instance_template/instance_template.py - name: instance_template.py - -resources: - - name: it-${RAND} - type: instance_template.py - properties: - name: it-${RAND} - instanceDescription: Instance description - templateDescription: Template description - networks: - - network: $(ref.test-network-0-${RAND}.selfLink) - subnetwork: $(ref.test-subnetwork-0-${RAND}.selfLink) - accessConfigs: - - name: External NAT - type: ONE_TO_ONE_NAT - - network: $(ref.test-network-1-${RAND}.selfLink) - subnetwork: $(ref.test-subnetwork-1-${RAND}.selfLink) - accessConfigs: - - name: External NAT - type: ONE_TO_ONE_NAT - diskImage: ${IMAGE} - machineType: f1-micro - canIpForward: true - diskType: pd-ssd - diskSizeGb: 50 - tags: - items: - - ftp - - https - metadata: - items: - - key: createdBy - value: unitTest - labels: - name: wrench - - name: test-network-0-${RAND} - type: compute.v1.network - properties: - autoCreateSubnetworks: false - - name: test-network-1-${RAND} - type: compute.v1.network - properties: - autoCreateSubnetworks: false - - name: test-subnetwork-0-${RAND} - type: compute.v1.subnetwork - properties: - network: $(ref.test-network-0-${RAND}.selfLink) - ipCidrRange: 10.0.1.0/24 - region: us-central1 - - name: test-subnetwork-1-${RAND} - type: compute.v1.subnetwork - properties: - network: $(ref.test-network-1-${RAND}.selfLink) - ipCidrRange: 10.0.2.0/24 - region: us-central1 diff --git a/dm/templates/interconnect/README.md b/dm/templates/interconnect/README.md deleted file mode 100644 index 75ff94816b6..00000000000 --- a/dm/templates/interconnect/README.md +++ /dev/null @@ -1,71 +0,0 @@ -# Interconnect - -This template creates an Interconnect resource. - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) -- Create a [network](../network/README.md) -- Create a [interconnect] (../interconnect/README.md) -- Create a [cloud_router] (../cloud_router/README.md) -- Grant the [compute.networkAdmin](https://cloud.google.com/compute/docs/access/iam) - IAM role to the project service account - -## Deployment - -### Resources - -- [compute.v1.interconnects](https://cloud.google.com/compute/docs/reference/rest/v1/interconnects) - -### Properties - -See the `properties` section in the schema file(s): - -- [Interconnect](interconnect.py.schema) - -### Usage - -1. Clone the [Deployment Manager Samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit): - -```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit -``` - -2. Go to the [dm](../../) directory: - -```shell - cd dm -``` - -3. Copy the example DM config to be used as a model for the deployment; in this case, - [examples/interconnect_dedicated.yaml](examples/interconnect_dedicated.yaml): - -```shell - cp templates/interconnect/examples/interconnect_dedicated.yaml my_interconnect_dedicated.yaml -``` - -4. Change the values in the config file to match your specific GCP setup (for - properties, refer to the schema files listed above): - -```shell - vim my_interconnect_dedicated.yaml # <== change values to match your GCP setup -``` - -5. Create your deployment (replace \ with the relevant - deployment name): - -```shell - gcloud deployment-manager deployments create \ - --config my_interconnect_dedicated.yaml -``` - -6. In case you need to delete your deployment: - -```shell - gcloud deployment-manager deployments delete -``` - -## Examples - -- [Dedicated Interconnect](examples/interconnect_dedicated.yaml) diff --git a/dm/templates/interconnect/examples/interconnect_dedicated.yaml b/dm/templates/interconnect/examples/interconnect_dedicated.yaml deleted file mode 100644 index 8f14a95d20e..00000000000 --- a/dm/templates/interconnect/examples/interconnect_dedicated.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# Example of the Interconnect template usage. -# -# This example creates a DEDICATED Interconnect resource. -# -# Replace the following placeholders with appropriate values: -# : the Interconnect name -# : the name of the customer authorized for crossconnect -# : the URL of the Interconnect location -# : the amount of circuits for the Interconnect -# -# For details, refer to -# https://cloud.google.com/compute/docs/reference/rest/v1/interconnects. - -imports: - - path: templates/interconnect/interconnect.py - name: interconnect.py - -resources: - - name: my_dedicated_interconnect - type: interconnect.py - properties: - name: - customerName: - interconnectType: DEDICATED - location: - requestedLinkCount: diff --git a/dm/templates/interconnect/interconnect.py b/dm/templates/interconnect/interconnect.py deleted file mode 100644 index befca6d536d..00000000000 --- a/dm/templates/interconnect/interconnect.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" This template creates an Interconnect resource. """ - - -def generate_config(context): - """ Entry point for the deployment resources. """ - - properties = context.properties - name = properties.get('name', context.env['name']) - project_id = properties.get('project', context.env['project']) - - resources = [] - intercon = { - 'name': context.env['name'], - # https://cloud.google.com/compute/docs/reference/rest/v1/interconnects - 'type': 'gcp-types/compute-v1:interconnects', - 'properties': - { - 'project': project_id, - 'name': name, - 'customerName': - context.properties['customerName'], - 'interconnectType': - context.properties['interconnectType'], - 'location': - context.properties['location'], - 'requestedLinkCount': - context.properties['requestedLinkCount'] - } - } - - optional_props = [ - 'adminEnabled', - 'description', - 'linkType', - 'nocContactEmail' - ] - - for prop in optional_props: - if prop in context.properties: - intercon['properties'][prop] = context.properties[prop] - - resources.append(intercon) - - return { - 'resources': - resources, - 'outputs': - [ - { - 'name': 'name', - 'value': name - }, - { - 'name': 'selfLink', - 'value': '$(ref.{}.selfLink)'.format(context.env['name']) - } - ] - } diff --git a/dm/templates/interconnect/interconnect.py.schema b/dm/templates/interconnect/interconnect.py.schema deleted file mode 100644 index 50d0cb6ab08..00000000000 --- a/dm/templates/interconnect/interconnect.py.schema +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Interconnect (Dedicated) - author: Sourced Group Inc. - version: 1.0.0 - description: | - Supports creation of an Interconnect resource. - For more information on this resource: - https://cloud.google.com/compute/docs/reference/rest/v1/interconnects. - - APIs endpoints used by this template: - - gcp-types/compute-v1:interconnects => - https://cloud.google.com/compute/docs/reference/rest/v1/interconnects - -additionalProperties: false - -required: - - name - - customerName - - location - - requestedLinkCount - -properties: - name: - type: string - description: The name of the Interconnect resource. Resource name would be used if omitted. - project: - type: string - description: | - The project ID of the project containing the instance. - adminEnabled: - type: boolean - description: | - The administrative status of the Interconnect. If True, the - Interconnect is functional and can carry traffic. If False, no - packets can be carried over the Interconnect, and no BGP routes are - exchanged over it. By default, True. - customerName: - type: string - description: | - The customer name to be put in the Letter of Authorization as the party - authorized to request a crossconnect. - description: - type: string - description: | - The optional description of the resource. Provide a value when you - create the resource. - interconnectType: - type: string - description: The Interconnect type. - enum: - - DEDICATED - - PARTNER - linkType: - type: string - description: | - The type of the requested link. Indicates the speed of each of the - links in the bundle, not of the entire bundle. Only 10G per link is - allowed for a dedicated Interconnect. Options: Ethernet_10G_LR. - enum: - - LINK_TYPE_ETHERNET_10G_LR - - LINK_TYPE_ETHERNET_100G_LR - requestedLinkCount: - type: number - description: | - Target number of physical links in the link bundle, as requested by the customer. - location: - type: string - description: | - The URL of the InterconnectLocation object that defines where the - connection is to be provisioned. - nocContactEmail: - type: string - description: | - The email address to contact the customer NOC for operations and - maintenance notifications regarding the Interconnect. If specified, - this is used for notifications in addition to all other forms described, - such as Stackdriver log alerting and Cloud Notifications. - -outputs: - name: - type: string - description: The Interconnect name. - selfLink: - type: string - description: The server-defined URL for the resource. - -documentation: - - templates/interconnect/README.md - -examples: - - templates/interconnect/examples/interconnect_dedicated.yaml diff --git a/dm/templates/interconnect_attachment/README.md b/dm/templates/interconnect_attachment/README.md deleted file mode 100644 index 1d38153cefe..00000000000 --- a/dm/templates/interconnect_attachment/README.md +++ /dev/null @@ -1,71 +0,0 @@ -# Interconnect Attachment - -This template creates an Interconnect Attachment. - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) -- Create a [network](../network/README.md) -- Create a [interconnect] (../interconnect/README.md) -- Create a [cloud_router] (../cloud_router/README.md) -- Grant the [compute.networkAdmin](https://cloud.google.com/compute/docs/access/iam) i - IAM role to the project service account - -## Deployment - -### Resources - -- [compute.v1.interconnectAttachments](https://cloud.google.com/compute/docs/reference/rest/v1/interconnectAttachments) - -### Properties - -See the `properties` section in the schema file(s): - -- [Interconnect Attachment](interconnect_attachment.py.schema) - -### Usage - -1. Clone the [Deployment Manager Samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit): - -```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit -``` - -2. Go to the [dm](../../) directory: - -```shell - cd dm -``` - -3. Copy the example DM config to be used as a model for the deployment; in this case, [examples/interconnect_attachment.yaml](examples/interconnect_attachment.yaml): - -```shell - cp templates/interconnect_attachment/examples/interconnect_attachment.yaml my_interconnect_attachment.yaml -``` - -4. Change the values in the config file to match your specific GCP setup (for - properties, refer to the schema files listed above): - -```shell - vim my_interconnect_attachment.yaml # <== change values to match your GCP setup -``` - -5. Create your deployment (replace \ with the relevant - deployment name): - -```shell - gcloud deployment-manager deployments create \ - --config my_interconnect_attachment.yaml -``` - -6. In case you need to delete your deployment: - -```shell - gcloud deployment-manager deployments delete -``` - -## Examples - -- [Dedicated Interconnect Attachment](examples/interconnect_attachment_dedicated.yaml) -- [Partner Interconnect Attachment](examples/interconnect_attachment_partner.yaml) diff --git a/dm/templates/interconnect_attachment/examples/interconnect_attachment_dedicated.yaml b/dm/templates/interconnect_attachment/examples/interconnect_attachment_dedicated.yaml deleted file mode 100644 index 83ea23096f1..00000000000 --- a/dm/templates/interconnect_attachment/examples/interconnect_attachment_dedicated.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Example of the Interconnect Attachment template usage. -# -# This example creates an interconnect attachment on a DEDICATED Interconnect -# -# Replace the following with appropriate values -# : replace with the name of the attachment -# : replace with the url of the cloud router. -# : replace with the region that the cloud router is in. -# : DEDICATED Circuits only replace with the url -# of the interconnect -# Refer to -# https://cloud.google.com/compute/docs/reference/rest/v1/interconnectAttachments -# for more details - -imports: - - path: templates/interconnect_attachment/interconnect_attachment.py - name: interconnect_attachment.py - -resources: - - name: my_dedicated_interconnect_attachment - type: interconnect_attachment.py - properties: - name: - router: - region: - interconnect: - type: DEDICATED diff --git a/dm/templates/interconnect_attachment/examples/interconnect_attachment_partner.yaml b/dm/templates/interconnect_attachment/examples/interconnect_attachment_partner.yaml deleted file mode 100644 index 7848947c31a..00000000000 --- a/dm/templates/interconnect_attachment/examples/interconnect_attachment_partner.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Example of the Interconnect Attachment template usage. -# -# This example creates an attachment on a PARTNER Interconnect -# -# Replace the following with appropriate values -# : replace with the name of the attachment -# : replace with the url of the cloud router. -# : replace with the region that the cloud router is in. -# : PARTNER Inertonnects only replace with the -# edge availibility domain -# Refer to -# https://cloud.google.com/compute/docs/reference/rest/v1/interconnectAttachments -# for more details - -imports: - - path: templates/interconnect_attachment/interconnect_attachment.py - name: interconnect_attachment.py - -resources: - - name: my_partner_interconnect_attachment - type: interconnect_attachment.py - properties: - name: - router: - region: - edgeAvailabilityDomain: - type: PARTNER diff --git a/dm/templates/interconnect_attachment/interconnect_attachment.py b/dm/templates/interconnect_attachment/interconnect_attachment.py deleted file mode 100644 index 593cf820190..00000000000 --- a/dm/templates/interconnect_attachment/interconnect_attachment.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" This template creates an Interconnect Attachment. """ - - -def generate_config(context): - """ Entry point for the deployment resources. """ - - properties = context.properties - name = properties.get('name', context.env['name']) - project_id = properties.get('project', context.env['project']) - - resources = [] - attach = { - 'name': context.env['name'], - # https://cloud.google.com/compute/docs/reference/rest/v1/interconnectAttachments - 'type': 'gcp-types/compute-v1:interconnectAttachments', - 'properties': - { - 'project': project_id, - 'name': name, - 'router': - context.properties['router'], - 'region': - context.properties['region'], - 'type': - context.properties['type'] - } - } - - optional_props = [ - 'adminEnabled', - 'bandwidth', - 'candidateSubnets', - 'description', - 'edgeAvailabilityDomain', - 'interconnect', - 'partnerAsn', - 'partnerMetadata', - 'vlanTag8021q', - ] - - for prop in optional_props: - if prop in context.properties: - attach['properties'][prop] = context.properties[prop] - - resources.append(attach) - - return { - 'resources': - resources, - 'outputs': - [ - { - 'name': 'name', - 'value': name - }, - { - 'name': 'selfLink', - 'value': '$(ref.{}.selfLink)'.format(context.env['name']) - } - ] - } diff --git a/dm/templates/interconnect_attachment/interconnect_attachment.py.schema b/dm/templates/interconnect_attachment/interconnect_attachment.py.schema deleted file mode 100644 index 7948d4a2dcf..00000000000 --- a/dm/templates/interconnect_attachment/interconnect_attachment.py.schema +++ /dev/null @@ -1,213 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Interconnect Attachment - author: Sourced Group Inc. - version: 1.0.0 - description: | - Creates an Interconnect Attachment. - - For more information on this resource: - https://cloud.google.com/interconnect/docs/how-to/dedicated/creating-vlan-attachments (Dedicated) - https://cloud.google.com/interconnect/docs/how-to/partner/creating-vlan-attachments (Partner) - - APIs endpoints used by this template: - - gcp-types/compute-v1:interconnectAttachments => - https://cloud.google.com/compute/docs/reference/rest/v1/interconnectAttachments - -additionalProperties: false - -required: - - router - - region - - type - -oneOf: - - allOf: - - properties: - type: - enum: ["PARTNER"] - - not: - required: - - pairingKey - - not: - required: - - bandwidth - - not: - required: - - partnerMetadata - - not: - required: - - partnerAsn - - allOf: - - properties: - type: - enum: ["PARTNER_PROVIDER"] - - not: - required: - - adminEnabled - - not: - required: - - edgeAvailabilityDomain - - allOf: - - properties: - type: - enum: ["DEDICATED"] - - not: - required: - - pairingKey - - not: - required: - - edgeAvailabilityDomain - - not: - required: - - partnerAsn - -properties: - name: - type: string - description: | - The name of the Interconnect Attachment resource. Resource name would be used if omitted. - project: - type: string - description: | - The project ID of the project containing the instance. - router: - type: string - description: | - URL of the Cloud Router to be used for dynamic routing. This router must be in the same region as this - InterconnectAttachment. The InterconnectAttachment will automatically connect the Interconnect to the - network & region within which the Cloud Router is configured. - - Authorization requires the following Google IAM permission on the specified resource router: - - compute.routers.use - region: - type: string - description: | - The URL of the region where the router resides. - pairingKey: - type: string - description: | - The opaque identifier of an PARTNER attachment used to initiate provisioning with a selected partner. - Of the form "XXXXX/region/domain" - type: - type: string - description: | - The type of interconnect attachment this is. - enum: - - DEDICATED - - PARTNER - - PARTNER_PROVIDER - bandwidth: - type: string - description: | - Provisioned bandwidth capacity for the interconnect attachment. For attachments of type DEDICATED, - the user can set the bandwidth. For attachments of type PARTNER, the Google Partner that is operating - the interconnect must set the bandwidth. - Output only for PARTNER type, mutable for PARTNER_PROVIDER and DEDICATED. - enum: - - BPS_50M - - BPS_100M - - BPS_200M - - BPS_300M - - BPS_400M - - BPS_500M - - BPS_1G - - BPS_2G - - BPS_5G - - BPS_10G - adminEnabled: - type: boolean - description: | - Determines whether this Attachment will carry packets. Not present for PARTNER_PROVIDER. - partnerMetadata: - type: object - additionalProperties: false - description: | - Informational metadata about Partner attachments from Partners to display to customers. - Output only for for PARTNER type, mutable for PARTNER_PROVIDER, not available for DEDICATED. - properties: - partnerName: - type: string - description: | - Plain text name of the Partner providing this attachment. - This value may be validated to match approved Partner values. - interconnectName: - type: string - description: | - Plain text name of the Interconnect this attachment is connected to, as displayed in the Partner’s portal. - For instance "Chicago 1". This value may be validated to match approved Partner values. - portalUrl: - type: string - description: | - URL of the Partner’s portal for this Attachment. Partners may customise this to be a deep link to the - specific resource on the Partner portal. This value may be validated to match approved Partner values. - vlanTag8021q: - type: number - description: | - The IEEE 802.1Q VLAN tag for this attachment, in the range 2-4094. Only specified at creation time. - interconnect: - type: string - description: | - URL of the underlying Interconnect object that this attachment's traffic - will traverse through. - - Authorization requires the following Google IAM permission on the specified resource interconnect: - - compute.interconnects.use - partnerAsn: - type: string - description: | - Optional BGP ASN for the router supplied by a Layer 3 Partner if they configured BGP on behalf of the customer. - Output only for PARTNER type, input only for PARTNER_PROVIDER, not available for DEDICATED. - candidateSubnets: - type: array - uniqItems: True - description: | - Up to 16 candidate prefixes that can be used to restrict the allocation of cloudRouterIpAddress and - customerRouterIpAddress for this attachment. All prefixes must be within link-local address space (169.254.0.0/16) - and must be /29 or shorter (/28, /27, etc). Google will attempt to select an unused /29 from the supplied - candidate prefix(es). The request will fail if all possible /29s are in use on Google’s edge. - If not supplied, Google will randomly select an unused /29 from all of link-local space. - maxItems: 16 - items: - type: string - edgeAvailabilityDomain: - type: string - description: | - Desired availability domain for the attachment. Only available for type - PARTNER, at creation time. - - For improved reliability, customers should configure a pair of attachments, one per availability domain. - The selected availability domain will be provided to the Partner via the pairing key, so that the provisioned - circuit will lie in the specified domain. If not specified, the value will default to AVAILABILITY_DOMAIN_ANY. - enum: - - AVAILABILITY_DOMAIN_1 - - AVAILABILITY_DOMAIN_2 - - AVAILABILITY_DOMAIN_ANY - -outputs: - name: - type: string - description: The created attachments name. - selfLink: - type: string - description: Server-defined URL for the resource. - -documentation: - - templates/interconnect_attachment/README.md - -examples: - - templates/interconnect_attachment/examples/interconnect_attachment_dedicated.yaml - - templates/interconnect_attachment/examples/interconnect_attachment_partner.yaml diff --git a/dm/templates/internal_load_balancer/README.md b/dm/templates/internal_load_balancer/README.md deleted file mode 100644 index 1829c9b2341..00000000000 --- a/dm/templates/internal_load_balancer/README.md +++ /dev/null @@ -1,71 +0,0 @@ -# Internal Load Balancer - -This template creates an internal load balancer that consists of a forwarding -rule and a regional backend service. - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) -- Grant the [compute.admin](https://cloud.google.com/compute/docs/access/iam) - IAM role to the Deployment Manager service account - -## Deployment - -### Resources - -- [compute.v1.forwardingRule](https://cloud.google.com/compute/docs/reference/latest/forwardingRules) -- [compute.v1.regionalBackendService](https://cloud.google.com/compute/docs/reference/latest/regionBackendServices) - -### Properties - -See the `properties` section in the schema file(s): - -- [Internal Load Balancer](internal_load_balancer.py.schema) - -### Usage - -1. Clone the [Deployment Manager Samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit): - -```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit -``` - -2. Go to the [dm](../../) directory: - -```shell - cd dm -``` - -3. Copy the example DM config to be used as a model for the deployment; in this - case, [examples/internl\_load\_balancer.yaml](examples/internal_load_balancer.yaml): - -```shell - cp templates/internal_load_balancer/examples/internal_load_balancer.yaml \ - my_internal_load_balancer.yaml -``` - -4. Change the values in the config file to match your specific GCP setup (for - properties, refer to the schema files listed above): - -```shell - vim my_internal_load_balancer.yaml # <== change values to match your GCP setup -``` - -5. Create your deployment (replace with the relevant - deployment name): - -```shell - gcloud deployment-manager deployments create \ - --config my_internal_load_balancer.yaml -``` - -6. In case you need to delete your deployment: - -```shell - gcloud deployment-manager deployments delete -``` - -## Examples - -- [Internal Load Balancer](examples/internal_load_balancer.yaml) diff --git a/dm/templates/internal_load_balancer/examples/internal_load_balancer.yaml b/dm/templates/internal_load_balancer/examples/internal_load_balancer.yaml deleted file mode 100644 index 3bbcb64cedb..00000000000 --- a/dm/templates/internal_load_balancer/examples/internal_load_balancer.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# Example of the internal load balancer template usage. -# -# Replace the following placeholders with valid values: -# : a region where load balancer resides -# : a URL of the regional instance group -# : a URL of the TCP healthcheck -# : a port number to which the traffic is delivered - -imports: - - path: templates/internal_load_balancer/internal_load_balancer.py - name: internal_load_balancer.py - -resources: - - name: internal-load-balancer - type: internal_load_balancer.py - properties: - region: - protocol: TCP - ports: - - - backendService: - healthCheck: - sessionAffinity: CLIENT_IP - backends: - - group: - type: internal_load_balancer.py diff --git a/dm/templates/internal_load_balancer/internal_load_balancer.py b/dm/templates/internal_load_balancer/internal_load_balancer.py deleted file mode 100644 index efc7290fcea..00000000000 --- a/dm/templates/internal_load_balancer/internal_load_balancer.py +++ /dev/null @@ -1,141 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" This template creates an internal load balancer. """ - - -def set_optional_property(destination, source, prop_name): - """ Copies the property value if present. """ - - if prop_name in source: - destination[prop_name] = source[prop_name] - - -def get_backend_service(properties, project_id, res_name): - """ Creates the backend service. """ - - backend_spec = properties['backendService'] - for backend in backend_spec['backends']: - backend.update({ - 'balancingMode': 'CONNECTION' - }) - - name = '{}-bs'.format(res_name) - backend_properties = { - 'name': backend_spec.get('name', properties.get('name', name)), - 'project': project_id, - 'loadBalancingScheme': 'INTERNAL', - 'protocol': properties['protocol'], - 'region': properties['region'], - } - - backend_resource = { - 'name': name, - 'type': 'backend_service.py', - 'properties': backend_properties - } - - optional_properties = [ - 'description', - 'backends', - 'timeoutSec', - 'sessionAffinity', - 'connectionDraining', - 'backends', - 'healthCheck', - 'healthChecks', - ] - - for prop in optional_properties: - set_optional_property(backend_properties, backend_spec, prop) - - return [backend_resource], [ - { - 'name': 'backendServiceName', - 'value': backend_resource['properties']['name'], - }, - { - 'name': 'backendServiceSelfLink', - 'value': '$(ref.{}.selfLink)'.format(name), - }, - ] - - -def get_forwarding_rule(properties, backend, project_id, res_name): - """ Creates the forwarding rule. """ - - rule_properties = { - 'name': properties.get('name', res_name), - 'project': project_id, - 'loadBalancingScheme': 'INTERNAL', - 'IPProtocol': properties['protocol'], - 'backendService': '$(ref.{}.selfLink)'.format(backend['name']), - 'region': properties['region'], - } - - rule_resource = { - 'name': res_name, - 'type': 'forwarding_rule.py', - 'properties': rule_properties, - } - - optional_properties = [ - 'description', - 'IPAddress', - 'ipVersion', - 'ports', - 'network', - 'subnetwork', - ] - - for prop in optional_properties: - set_optional_property(rule_properties, properties, prop) - - return [rule_resource], [ - { - 'name': 'forwardingRuleName', - 'value': res_name, - }, - { - 'name': 'forwardingRuleSelfLink', - 'value': '$(ref.{}.selfLink)'.format(res_name), - }, - { - 'name': 'IPAddress', - 'value': '$(ref.{}.IPAddress)'.format(res_name), - }, - { - 'name': 'region', - 'value': properties['region'] - }, - ] - - -def generate_config(context): - """ Entry point for the deployment resources. """ - - properties = context.properties - project_id = properties.get('project', context.env['project']) - - backend_resources, backend_outputs = get_backend_service(properties, project_id, context.env['name']) - rule_resources, rule_outputs = get_forwarding_rule( - properties, - backend_resources[0], - project_id, - context.env['name'] - ) - - return { - 'resources': rule_resources + backend_resources, - 'outputs': rule_outputs + backend_outputs - } diff --git a/dm/templates/internal_load_balancer/internal_load_balancer.py.schema b/dm/templates/internal_load_balancer/internal_load_balancer.py.schema deleted file mode 100644 index 2906c223050..00000000000 --- a/dm/templates/internal_load_balancer/internal_load_balancer.py.schema +++ /dev/null @@ -1,201 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Internal Load Balancer - version: 1.0.1 - author: Sourced Group Inc. - description: | - Supports the creation of an internal load balancing solution that consists - of a forwarding rule and a backend service. For details, visit - https://cloud.google.com/load-balancing/docs/internal/. - -imports: - - path: ../backend_service/backend_service.py - name: backend_service.py - - path: ../forwarding_rule/forwarding_rule.py - name: forwarding_rule.py - -additionalProperties: false - -required: - - region - - backendService - -properties: - name: - type: string - description: | - The internal load balancer name. This name is assigned to the - underlying forwarding rule resource. - Resource name would be used if omitted. - project: - type: string - description: | - The project ID of the project containing resources. The - Google apps domain is prefixed if applicable. - description: - type: string - description: | - An optional description of the internal load balancer. This description - is assigned to the underlying forwarding rule resource. - region: - type: string - description: | - The name of the region where the internal load balancer resides. - ports: - type: array - uniqItems: true - description: | - The list of ports; only packets addressed to these ports are forwarded - to the backends configured with the load balancer. - items: - type: integer - minimum: 1 - maximum: 65535 - network: - type: string - description: | - The network which the load-balanced IP must belong to. If no value is provided, - the default network is used. - If you specify this property, you can specify the network as a full or - partial URL. For example, the following are all valid URLs: - - https://www.googleapis.com/compute/v1/projects/PROJECT/global/networks/NETWORK - - projects/PROJECT/global/networks/NETWORK - - global/networks/default - subnetwork: - type: string - description: | - The subnetwork which the load-balanced IP must belong to. - If you specify this property, you can specify the subnetwork as a full or - partial URL. For example, the following are all valid URLs: - - https://www.googleapis.com/compute/v1/projects/PROJECT/regions/REGION/subnetworks/SUBNETWORK - - regions/REGION/subnetworks/SUBNETWORK - protocol: - type: string - default: TCP - description: The protocol of the traffic the load balancer uses. - enum: - - TCP - - UDP - backendService: - type: object - description: The backend service configuration. - additionalProperties: false - oneOf: - - required: - - healthCheck - - required: - - healthChecks - required: - - backends - properties: - name: - type: string - description: The backend service resource name. - description: - type: string - description: An optional description of the backend service resource. - backends: - type: array - uniqItems: true - description: | - The list of backends (instance groups) to which the backend service - distributes traffic. - items: - type: object - additionalProperties: false - required: - - group - properties: - description: - type: string - description: An optional description of the resource. - group: - type: string - description: | - The fully-qualified URL of the Instance Group resource. The - instance group must reside in the same region as the backend - service. - timeoutSec: - type: number - default: 30 - description: | - The number of seconds to wait for the backend response before - considering the request as failed. - healthCheck: - type: string - description: | - The URL of the HealthCheck resource for healthchecking the backend - service. - healthChecks: - type: array - uniqueItems: true - maxItems: 1 - description: | - The URL of the HealthCheck, HttpHealthCheck, or HttpsHealthCheck resource - for healthchecking the backend service. - items: - type: string - sessionAffinity: - type: string - default: NONE - description: | - The type of the session affinity to use. This field is not used with - the UDP protocol. - enum: - - NONE - - CLIENT_IP - - CLIENT_IP_PROTO - - CLIENT_IP_PORT_PROTO - connectionDraining: - type: object - additionalProperties: false - description: The connection draining settings. - properties: - drainingTimeoutSec: - type: integer - description: | - The time period during which the instance is drained (is not - accepting new connections but still procedding the ones accepted - earlier). - -outputs: - forwardingRuleName: - type: string - description: The name of the internal load balancer's forwarding rule. - backendServiceName: - type: string - description: The name of the internal load balancer's backend service. - region: - type: string - description: | - The URL of the region where the internal load balancer resides. - forwardingRuleSelfLink: - type: string - description: The URI (SelfLink) of the forwarding rule resource. - backendServiceSelfLink: - type: string - description: The URI (SelfLink) of the backend service resource. - IPAddress: - type: string - description: | - The IP address on whose behalf the internal load balancer - (the forwarding rule) operates. - -documentation: - - templates/internal_load_balancer/README.md - -examples: - - templates/internal_load_balancer/examples/internal_load_balancer.yaml diff --git a/dm/templates/internal_load_balancer/tests/integration/internal_load_balancer.bats b/dm/templates/internal_load_balancer/tests/integration/internal_load_balancer.bats deleted file mode 100755 index 6b7338e059d..00000000000 --- a/dm/templates/internal_load_balancer/tests/integration/internal_load_balancer.bats +++ /dev/null @@ -1,120 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores with dashes in the deployment name. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" - # test specific variables - export REGION="us-east1" - export ILB_RES_NAME="internal-load-balancer-${RAND}" - export ILB_NAME="internal-load-balancer-name-${RAND}" - export ILB_DESCRIPTION="ILB Description" - export PROTOCOL="TCP" - export ILB_PORT="80" - export NETWORK_NAME="test-network-${RAND}" - export BS_NAME="backend-service-name-${RAND}" - export BS_DESCRIPTION="backend description" - export BS_AFFINITY="CLIENT_IP" - export BS_DRAINING="70" - export TIMEOUT="40" - export HC_NAME="tcp-healthcheck-${RAND}" - export BACKEND_DESCRIPTION="instance group description" - export IGM_NAME="regional-igm-${RAND}" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < ${BATS_TEST_DIRNAME}/${TEST_NAME}.yaml > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - delete_config - fi - - # Per-test teardown steps. -} - - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - run gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config ${CONFIG} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] -} - -@test "Verifying forwarding rule" { - run gcloud compute forwarding-rules describe "${ILB_NAME}" \ - --region ${REGION} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "IPProtocol: ${PROTOCOL}" ]] - [[ "$output" =~ "${BS_NAME}" ]] - [[ "$output" =~ "loadBalancingScheme: INTERNAL" ]] - [[ "$output" =~ "name: ${ILB_NAME}" ]] - [[ "$output" =~ "- '${ILB_PORT}'" ]] - [[ "$output" =~ "${ILB_DESCRIPTION}" ]] - [[ "$output" =~ "${NETWORK_NAME}" ]] -} - -@test "Verifying backend service" { - run gcloud compute backend-services describe "${BS_NAME}" \ - --region ${REGION} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "description: ${BS_DESCRIPTION}" ]] - [[ "$output" =~ "protocol: ${PROTOCOL}" ]] - [[ "$output" =~ "loadBalancingScheme: INTERNAL" ]] - [[ "$output" =~ "sessionAffinity: ${BS_AFFINITY}" ]] - [[ "$output" =~ "timeoutSec: ${TIMEOUT}" ]] - [[ "$output" =~ "${HC_NAME}" ]] - [[ "$output" =~ "drainingTimeoutSec: ${BS_DRAINING}" ]] -} - -@test "Verifying backend" { - run gcloud compute backend-services describe "${BS_NAME}" \ - --format "yaml(backends[0])" --region ${REGION} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "description: ${BACKEND_DESCRIPTION}" ]] - [[ "$output" =~ "balancingMode: CONNECTION" ]] - [[ "$output" =~ "${IGM_NAME}" ]] -} - -@test "Deleting deployment" { - run gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" -q \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] -} diff --git a/dm/templates/internal_load_balancer/tests/integration/internal_load_balancer.yaml b/dm/templates/internal_load_balancer/tests/integration/internal_load_balancer.yaml deleted file mode 100644 index 32a964d5b1a..00000000000 --- a/dm/templates/internal_load_balancer/tests/integration/internal_load_balancer.yaml +++ /dev/null @@ -1,71 +0,0 @@ -# Test of the internal load balancer template. - -imports: - - path: templates/internal_load_balancer/internal_load_balancer.py - name: internal_load_balancer.py - - -resources: - - name: ${ILB_RES_NAME} - type: internal_load_balancer.py - properties: - protocol: ${PROTOCOL} - region: ${REGION} - name: ${ILB_NAME} - description: ${ILB_DESCRIPTION} - ports: - - ${ILB_PORT} - network: $(ref.${NETWORK_NAME}.selfLink) - backendService: - name: ${BS_NAME} - description: ${BS_DESCRIPTION} - timeoutSec: ${TIMEOUT} - healthCheck: $(ref.${HC_NAME}.selfLink) - sessionAffinity: ${BS_AFFINITY} - connectionDraining: - drainingTimeoutSec: ${BS_DRAINING} - backends: - - group: $(ref.${IGM_NAME}.instanceGroup) - description: ${BACKEND_DESCRIPTION} - -# Test prerequisites. - - name: ${IGM_NAME} - type: compute.v1.regionInstanceGroupManager - properties: - instanceTemplate: $(ref.instance-template-${RAND}.selfLink) - region: ${REGION} - targetSize: 3 - - - name: instance-template-${RAND} - type: compute.v1.instanceTemplate - properties: - properties: - machineType: f1-micro - disks: - - autoDelete: true - boot: true - deviceName: boot - initializeParams: - sourceImage: projects/ubuntu-os-cloud/global/images/family/ubuntu-1804-lts - type: PERSISTENT - networkInterfaces: - - accessConfigs: - - name: External NAT - type: ONE_TO_ONE_NAT - network: $(ref.test-network-${RAND}.selfLink) - metadata: - items: - - key: startup-script - value: sudo apt-get update && sudo apt-get install -y nginx - - - name: ${NETWORK_NAME} - type: compute.v1.network - properties: - autoCreateSubnetworks: true - - - name: ${HC_NAME} - type: compute.v1.healthCheck - properties: - type: TCP - tcpHealthCheck: - port: ${ILB_PORT} diff --git a/dm/templates/ip_reservation/README.md b/dm/templates/ip_reservation/README.md deleted file mode 100644 index 81357a7299d..00000000000 --- a/dm/templates/ip_reservation/README.md +++ /dev/null @@ -1,72 +0,0 @@ -# IP Reservation - -This template creates an IP reservation. -Depending on the input option, the following addresses can be reserved: -- Global -- External -- Internal - -## Prerequisites -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) -- Grant the [compute.networkAdmin](https://cloud.google.com/compute/docs/access/iam) IAM role to the project service account (unless the default Project Editor role is already granted) - - -## Deployment - -### Resources - -- [gcp-types/compute-v1:address](https://cloud.google.com/compute/docs/reference/rest/v1/addresses) -- [gcp-types/compute-v1:globalAddress](https://cloud.google.com/compute/docs/reference/rest/v1/addresses) - - -### Properties - -See the `properties` section in the schema file(s): -- [IP Reservation](ip_reservation.py.schema) - - -#### Usage - -1. Clone the [Deployment Manager samples_repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit) - -```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit -``` - -2. Go to the [dm](../../../cloud-foundation) directory - -```shell - cd dm -``` - -3. Copy the example DM config to be used as a model for the deployment, in this case [examples/ip_reservation.yaml](examples/ip_reservation.yaml) - -```shell - cp templates/ip_reservation/examples/ip_reservation.yaml my_ip_reservation.yaml -``` - -4. Change the values in the config file to match your specific GCP setup. - Refer to the properties in the schema files described above. - -```shell - vim my_ip_reservation.yaml # <== change values to match your GCP setup -``` - -5. Create your deployment as described below, replacing - with your with your own deployment name - -```shell - gcloud deployment-manager deployments create \ - --config my_ip_reservation.yaml -``` - -6. In case you need to delete your deployment: - -```shell - gcloud deployment-manager deployments delete -``` - -## Examples - -- [Reserving a global, external, or internal IP address](examples/ip_reservation.yaml) diff --git a/dm/templates/ip_reservation/examples/ip_reservation.yaml b/dm/templates/ip_reservation/examples/ip_reservation.yaml deleted file mode 100644 index 722501cbb31..00000000000 --- a/dm/templates/ip_reservation/examples/ip_reservation.yaml +++ /dev/null @@ -1,32 +0,0 @@ -# Example of the IP reservation template usage. -# -# In this example, an array of reserved IPs is applied to the PROJECTID project. -# -# : The project ID that created your network. -# : The network's region. -# : The subnetwork ID. -# - -imports: - - path: templates/ip_reservation/ip_reservation.py - name: ip_reservation.py - -resources: - - name: allmyips - type: ip_reservation.py - properties: - ipAddresses: - - name: myglobal - ipType: GLOBAL - description: 'my global ip' - - name: myregionalexternal - ipType: REGIONAL - region: - description: 'my static external ip' - - name: myinternal - ipType: INTERNAL - # This IP address must be within the subnet range. - address: 10.128.1.111 - subnetwork: projects//regions//subnetworks/ - region: - description: 'my internal ip' diff --git a/dm/templates/ip_reservation/ip_address.py b/dm/templates/ip_reservation/ip_address.py deleted file mode 100644 index 3d6cf5d60c4..00000000000 --- a/dm/templates/ip_reservation/ip_address.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" This template creates an IP address. """ - - -def get_address_type(ip_type): - """ Return the address type to reserve. """ - - if ip_type in ['GLOBAL', 'REGIONAL']: - return 'EXTERNAL' - - return 'INTERNAL' - -def get_resource_type(ip_type): - """ Return the address resource type. """ - - if ip_type == 'GLOBAL': - # https://cloud.google.com/compute/docs/reference/rest/v1/globalAddresses - return 'gcp-types/compute-v1:globalAddresses' - - # https://cloud.google.com/compute/docs/reference/rest/v1/addresses - return 'gcp-types/compute-v1:addresses' - - -def generate_config(context): - """ Entry point for the deployment resources. """ - - properties = context.properties - resource_type = get_resource_type(context.properties['ipType']) - address_type = get_address_type(context.properties['ipType']) - name = context.properties.get('name', context.env['name']) - project_id = properties.get('project', context.env['project']) - - res_properties = { - 'addressType': address_type, - 'resourceType': 'addresses', - 'project': project_id, - } - - optional_properties = [ - 'subnetwork', - 'address', - 'description', - 'region', - 'networkTier', - 'prefixLength', - 'ipVersion', - 'purpose', - ] - - for prop in optional_properties: - if prop in context.properties: - res_properties[prop] = str(context.properties[prop]) - - resources = [ - { - 'name': name, - 'type': resource_type, - 'properties': res_properties - } - ] - - outputs = [ - { - 'name': 'selfLink', - 'value': '$(ref.{}.selfLink)'.format(name) - }, - { - 'name': 'address', - 'value': '$(ref.{}.address)'.format(name) - }, - { - 'name': 'status', - 'value': '$(ref.{}.status)'.format(name) - } - ] - - return {'resources': resources, 'outputs': outputs} diff --git a/dm/templates/ip_reservation/ip_address.py.schema b/dm/templates/ip_reservation/ip_address.py.schema deleted file mode 100644 index 9c4eff20cef..00000000000 --- a/dm/templates/ip_reservation/ip_address.py.schema +++ /dev/null @@ -1,195 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: IP Address - author: Sourced Group Inc. - version: 1.0.0 - description: | - Creates an internal, external, or global IP address. - - For more information on this resource: - https://cloud.google.com/compute/docs/ip-addresses/reserve-static-external-ip-address - - APIs endpoints used by this template: - - gcp-types/compute-v1:globalAddresses => - https://cloud.google.com/compute/docs/reference/rest/v1/globalAddresses - - gcp-types/compute-v1:addresses => - https://cloud.google.com/compute/docs/reference/rest/v1/addresses - -additionalProperties: false - -required: - - name - - ipType - -allOf: - - anyOf: - - allOf: - - properties: - purpose: - enum: ["GCE_ENDPOINT", "DNS_RESOLVER"] - - required: - - purpose - - allOf: - - properties: - ipType: - enum: ["INTERNAL"] - - required: - - ipType - - not: - required: - - subnetwork - - anyOf: - - allOf: - - properties: - purpose: - enum: ["VPC_PEERING"] - - required: - - purpose - - not: - required: - - network - - anyOf: - - allOf: - - properties: - ipType: - enum: ["REGIONAL", "INTERNAL"] - - required: - - ipType - - not: - required: - - region - - anyOf: - - allOf: - - properties: - ipType: - enum: ["GLOBAL"] - - required: - - ipType - - not: - required: - - ipVersion - - -properties: - name: - type: string - description: | - Name of the reserved IP; unique within the context of the project. - Resource name would be used if omitted. - project: - type: string - description: | - The project ID of the project containing the IP. The - Google apps domain is prefixed if applicable. - prefixLength: - type: number - description: | - The prefix length if the resource reprensents an IP range. - networkTier: - type: string - description: | - This signifies the networking tier used for configuring this address and can only take the following - values: PREMIUM or STANDARD. Global forwarding rules can only be Premium Tier. Regional forwarding rules - can be either Premium or Standard Tier. Standard Tier addresses applied to regional forwarding rules - can be used with any external load balancer. Regional forwarding rules in Premium Tier can only - be used with a network load balancer. - - If this field is not specified, it is assumed to be PREMIUM. - ipVersion: - type: string - description: | - The IP version that will be used by this address. Valid options are IPV4 or IPV6. - This can only be specified for a global address. - enum: - - IPV4 - - IPV6 - purpose: - type: string - description: | - The purpose of this resource, which can be one of the following values: - - GCE_ENDPOINT for addresses that are used by VM instances, alias IP ranges, internal load balancers, and similar resources. - DNS_RESOLVER for a DNS resolver address in a subnetwork - VPC_PEERING for addresses that are reserved for VPC peer networks. - NAT_AUTO for addresses that are external IP addresses automatically reserved for Cloud NAT. - enum: - - GCE_ENDPOINT - - DNS_RESOLVER - - VPC_PEERING - - NAT_AUTO - ipType: - type: string - description: | - The IP types the user can reserve. - - GLOBAL - for global entities; the IPs can only be used with global - forwarding rules (GLB) - - REGIONAL - static external IPs that reside in a region - - INTERNAL - static internal (RFC1918) IPs that reside in a region on a - subnet - enum: - - GLOBAL - - REGIONAL - - INTERNAL - description: - type: string - description: | - An optional description of this resource. Provide this field when you create the resource. - address: - type: string - description: | - If the field value (IP address) is provided, Deployment - Manager tries to reserve the specified IP address. If the field is - not set, Deployment Manager reserves an internal IP address that - is part of the subnet definition. - network: - type: string - description: | - The URL of the network in which to reserve the address. - This field can only be used with INTERNAL type with the VPC_PEERING purpose. - subnetwork: - type: string - description: | - The URL of the subnetwork in which to reserve the address. If an IP address is specified, - it must be within the subnetwork's IP range. This field can only be used with INTERNAL type with - a GCE_ENDPOINT or DNS_RESOLVER purpose. - region: - type: string - description: | - The region where the regional address resides. - -outputs: - selfLink: - type: string - description: The URI (SelfLink) of the address resource. - address: - type: string - description: | - The static IP address represented by this resource. - status: - type: string - description: | - The status of the address, which can be one of RESERVING, - RESERVED, or IN_USE. An address that is RESERVING is - currently in the process of being reserved. A RESERVED - address is currently reserved and available to use. An IN_USE - address is currently being used by another resource and is - not available. - -documentation: - - templates/ip_reservation/README.md - -examples: - - templates/ip_reservation/examples/ip_reservation.yaml diff --git a/dm/templates/ip_reservation/ip_reservation.py b/dm/templates/ip_reservation/ip_reservation.py deleted file mode 100644 index 9e5e72d5ddd..00000000000 --- a/dm/templates/ip_reservation/ip_reservation.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" This template creates an IP reservation. """ - - -def generate_config(context): - """ Entry point for the deployment resources. """ - - resources = [] - out = {} - - for ip_properties in context.properties['ipAddresses']: - name = ip_properties['name'] - resource = { - 'name': name, - 'type': 'ip_address.py', - 'properties': ip_properties - } - - resources.append(resource) - - out[name] = { - 'selfLink': '$(ref.' + name + '.selfLink)', - 'address': '$(ref.' + name + '.address)', - 'status': '$(ref.' + name + '.status)', - } - - outputs = [{'name': 'addresses', 'value': out}] - - return {'resources': resources, 'outputs': outputs} diff --git a/dm/templates/ip_reservation/ip_reservation.py.schema b/dm/templates/ip_reservation/ip_reservation.py.schema deleted file mode 100644 index 97206d03e77..00000000000 --- a/dm/templates/ip_reservation/ip_reservation.py.schema +++ /dev/null @@ -1,175 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: IP Reservation - author: Sourced Group Inc. - version: 1.0.0 - description: | - Reservers internal, external, or global IP address. - - For more information on this resource: - https://cloud.google.com/compute/docs/ip-addresses/reserve-static-external-ip-address - - APIs endpoints used by this template: - - gcp-types/compute-v1:globalAddresses => - https://cloud.google.com/compute/docs/reference/rest/v1/globalAddresses - - gcp-types/compute-v1:addresses => - https://cloud.google.com/compute/docs/reference/rest/v1/addresses - -imports: - - path: ../ip_reservation/ip_address.py - name: ip_address.py - -additionalProperties: false - -required: - - ipAddresses - -properties: - ipAddresses: - type: array - uniqueItems: true - description: | - An array of IPs to create as defined by the `ip_address.py` template. - Example: - - name: myregionalexternal - ipType: regional - region: - description: 'my static external ip' - items: - type: object - additionalProperties: false - properties: - name: - type: string - description: | - Name of the reserved IP; unique within the context of the project. - Resource name would be used if omitted. - project: - type: string - description: | - The project ID of the project containing the IP. The - Google apps domain is prefixed if applicable. - prefixLength: - type: number - description: | - The prefix length if the resource reprensents an IP range. - networkTier: - type: string - description: | - This signifies the networking tier used for configuring this address and can only take the following - values: PREMIUM or STANDARD. Global forwarding rules can only be Premium Tier. Regional forwarding rules - can be either Premium or Standard Tier. Standard Tier addresses applied to regional forwarding rules - can be used with any external load balancer. Regional forwarding rules in Premium Tier can only - be used with a network load balancer. - - If this field is not specified, it is assumed to be PREMIUM. - ipVersion: - type: string - description: | - The IP version that will be used by this address. Valid options are IPV4 or IPV6. - This can only be specified for a global address. - enum: - - IPV4 - - IPV6 - purpose: - type: string - description: | - The purpose of this resource, which can be one of the following values: - - GCE_ENDPOINT for addresses that are used by VM instances, alias IP ranges, internal load balancers, and similar resources. - DNS_RESOLVER for a DNS resolver address in a subnetwork - VPC_PEERING for addresses that are reserved for VPC peer networks. - NAT_AUTO for addresses that are external IP addresses automatically reserved for Cloud NAT. - enum: - - GCE_ENDPOINT - - DNS_RESOLVER - - VPC_PEERING - - NAT_AUTO - ipType: - type: string - description: | - The IP types the user can reserve. - - GLOBAL - for global entities; the IPs can only be used with global - forwarding rules (GLB) - - REGIONAL - static external IPs that reside in a region - - INTERNAL - static internal (RFC1918) IPs that reside in a region on a - subnet - enum: - - GLOBAL - - REGIONAL - - INTERNAL - description: - type: string - description: | - An optional description of this resource. Provide this field when you create the resource. - address: - type: string - description: | - If the field value (IP address) is provided, Deployment - Manager tries to reserve the specified IP address. If the field is - not set, Deployment Manager reserves an internal IP address that - is part of the subnet definition. - network: - type: string - description: | - The URL of the network in which to reserve the address. - This field can only be used with INTERNAL type with the VPC_PEERING purpose. - subnetwork: - type: string - description: | - The URL of the subnetwork in which to reserve the address. If an IP address is specified, - it must be within the subnetwork's IP range. This field can only be used with INTERNAL type with - a GCE_ENDPOINT or DNS_RESOLVER purpose. - region: - type: string - description: | - The region where the regional address resides. - -outputs: - addresses: - type: array - description: | - Array of address details. For example, the output can be referenced - as: `$(ref..addresses..selfLink)` - items: - description: The name of the address resource. - patternProperties: - ".*": - type: object - description: Details for an address resource. - properties: - selfLink: - type: string - description: The URI (SelfLink) of the address resource. - address: - type: string - description: | - The static IP address represented by this resource. - status: - type: string - description: | - The status of the address, which can be one of RESERVING, - RESERVED, or IN_USE. An address that is RESERVING is - currently in the process of being reserved. A RESERVED - address is currently reserved and available to use. An IN_USE - address is currently being used by another resource and is - not available. - -documentation: - - templates/ip_reservation/README.md - -examples: - - templates/ip_reservation/examples/ip_reservation.yaml diff --git a/dm/templates/ip_reservation/tests/integration/ip_reservation.bats b/dm/templates/ip_reservation/tests/integration/ip_reservation.bats deleted file mode 100755 index 3d9b602b145..00000000000 --- a/dm/templates/ip_reservation/tests/integration/ip_reservation.bats +++ /dev/null @@ -1,101 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -## Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < "templates/ip_reservation/tests/integration/${TEST_NAME}.yaml" > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - gcloud compute networks create network-${RAND} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --description "integration test ${RAND}" \ - --subnet-mode custom - gcloud compute networks subnets create subnet-${RAND} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --network=network-${RAND} --region=us-central1 \ - --range=10.100.0.0/23 - create_config - fi -} - -function teardown() { - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - gcloud compute networks subnets delete subnet-${RAND} \ - --region=us-central1 --project "${CLOUD_FOUNDATION_PROJECT_ID}" -q - gcloud compute networks delete network-${RAND} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -q - rm -f "${RANDOM_FILE}" - delete_config - fi -} - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config ${CONFIG} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -} - -@test "Verifying that global IPs were created as part of deployment ${DEPLOYMENT_NAME}" { - run gcloud compute addresses describe test-myglobal-"${RAND}" --global \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$output" =~ "name: test-myglobal-${RAND}" ]] - [[ "$output" =~ "status: RESERVED" ]] - [[ "$output" =~ "description: my global ip" ]] -} - -@test "Verifying that internal IPs were created as part of deployment ${DEPLOYMENT_NAME}" { - run gcloud compute addresses describe test-myinternal-"${RAND}" \ - --region us-central1 --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$output" =~ "name: test-myinternal-${RAND}" ]] - [[ "$output" =~ "status: RESERVED" ]] - [[ "$output" =~ "addressType: INTERNAL" ]] - [[ "$output" =~ "description: my us-central1 internal ip" ]] -} - -@test "Verifying that external static IPs are created as part of deployment ${DEPLOYMENT_NAME}" { - run gcloud compute addresses describe test-myregionalexternal-"${RAND}" \ - --region us-central1 --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$output" =~ "name: test-myregionalexternal-${RAND}" ]] - [[ "$output" =~ "status: RESERVED" ]] - [[ "$output" =~ "description: my us-central1 static external ip" ]] -} - -@test "Deleting deployment" { - gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -q - - run gcloud run gcloud compute addresses list \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ ! "$output" =~ "test-myglobal-${RAND}" ]] - [[ ! "$output" =~ "test-myinternal-${RAND}" ]] - [[ ! "$output" =~ "test-myregionalexternal-${RAND}" ]] -} diff --git a/dm/templates/ip_reservation/tests/integration/ip_reservation.yaml b/dm/templates/ip_reservation/tests/integration/ip_reservation.yaml deleted file mode 100644 index 5a3b7386bf0..00000000000 --- a/dm/templates/ip_reservation/tests/integration/ip_reservation.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Test of the IP reservation template. -# -# Variables: -# RAND: A random string used by the testing suite. -# - -imports: -- path: templates/ip_reservation/ip_reservation.py - name: ip_reservation.py - -resources: -- name: allmyips-${RAND} - type: ip_reservation.py - properties: - ipAddresses: - - name: test-myglobal-${RAND} - ipType: GLOBAL - description: 'my global ip' - - name: test-myregionalexternal-${RAND} - ipType: REGIONAL - region: us-central1 - description: 'my us-central1 static external ip' - - name: test-myinternal-${RAND} - ipType: INTERNAL - address: 10.100.0.111 - subnetwork: projects/${CLOUD_FOUNDATION_PROJECT_ID}/regions/us-central1/subnetworks/subnet-${RAND} - region: us-central1 - description: 'my us-central1 internal ip' diff --git a/dm/templates/kms/README.md b/dm/templates/kms/README.md deleted file mode 100644 index 50fc8da57df..00000000000 --- a/dm/templates/kms/README.md +++ /dev/null @@ -1,69 +0,0 @@ -# Google Cloud Key Management Service (KMS) - -This template creates a Google Cloud KMS KeyRing and Keys. - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) -- Grant the [cloudkms.admin](https://cloud.google.com/kms/docs/iam) IAM role to - the Deployment Manager service account - -## Deployment - -### Resources - -- [gcp-types/cloudkms-v1](https://cloud.google.com/kms/docs/reference/rest/) -- [KMS Object heirarchy](https://cloud.google.com/kms/docs/object-hierarchy) -- [KMS Key Version States](https://cloud.google.com/kms/docs/key-states) -- [KMS Object Lifetime](https://cloud.google.com/kms/docs/object-hierarchy#lifetime) - -### Properties - -See the `properties` section in the schema file(s): - -- [Cloud KMS](kms.py.schema) - -### Usage - -1. Clone the [Deployment Manager samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit) - - ```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit - ``` - -2. Go to the [dm](../../) directory - - ```shell - cd dm - ``` - -3. Copy the example DM config to be used as a model for the deployment, - in this case [examples/kms.yaml](examples/kms.yaml) - - ```shell - cp templates/kms/examples/kms.yaml my_kms.yaml - ``` - -4. Change the values in the config file to match your specific GCP setup. - Refer to the properties in the schema files described above. - - ```shell - vim my_kms.yaml # <== Replace all placeholders in this file - ``` - -5. Create your deployment as described below, replacing - with your with your own deployment name - - ```shell - gcloud deployment-manager deployments create \ - --config my_kms.yaml - ``` - -> **Note**: Once created, this deployment cannot be deleted. -> Refer to `KMS Object Lifetime` in [Resources](#Resources) section - -## Examples - -- [KMS KeyRing with Encryption Keys](examples/kms.yaml) -- [KMS KeyRing with Signing Keys](examples/kms_signkey.yaml) diff --git a/dm/templates/kms/examples/kms.yaml b/dm/templates/kms/examples/kms.yaml deleted file mode 100644 index 9b4b643a7fa..00000000000 --- a/dm/templates/kms/examples/kms.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# Example of the Google Cloud KMS template usage. -# -# In this example, a KMS KeyRing and cryptographic key are created. -# The cryptographic key's purpose is ENCRYPT_DECRYPT by default. -# IAM Policies are applied to the cryptographic key. -# -# Replace the following placeholders with relevant values: -# : a valid user account email address -# : Next Key Rotation time in format -# 2014-10-02T15:01:23.045123456Z - -imports: - - path: templates/kms/kms.py - name: kms.py - -resources: - - name: test-kms - type: kms.py - properties: - keyRingName: my-keyring-1 - region: global - keys: - - cryptoKeyName: my-crypto-key - nextRotationTime: \ No newline at end of file diff --git a/dm/templates/kms/examples/kms_signkey.yaml b/dm/templates/kms/examples/kms_signkey.yaml deleted file mode 100644 index 41b8d30b117..00000000000 --- a/dm/templates/kms/examples/kms_signkey.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# Example of the Google Cloud KMS template usage. -# -# In this example, a KMS KeyRing is created with a cryptographic signing key -# The key uses the RSA_SIGN_PKCS1_2048_SHA256 algorithm. - -imports: - - path: templates/kms/kms.py - name: kms.py - -resources: - - name: test-kms - type: kms.py - properties: - keyRingName: my-keyring-2 - region: us-east1 - keys: - - cryptoKeyName: my-sign-key - cryptoKeyPurpose: ASYMMETRIC_SIGN - versionTemplate: - protectionLevel: SOFTWARE - algorithm: RSA_SIGN_PKCS1_2048_SHA256 diff --git a/dm/templates/kms/kms.py b/dm/templates/kms/kms.py deleted file mode 100644 index 29da0cb2ccb..00000000000 --- a/dm/templates/kms/kms.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" Creates a Cloud KMS KeyRing and cryptographic key resources. """ - - -def generate_config(context): - """ - Entry point for the deployment resources - """ - - resources = [] - properties = context.properties - project_id = properties.get('project', context.env['project']) - parent = 'projects/{}/locations/{}'.format( - project_id, - properties.get('region') - ) - keyring_name = properties.get('keyRingName', context.env['name']) - keyring_id = '{}/keyRings/{}'.format(parent, keyring_name) - # https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings - provider = 'gcp-types/cloudkms-v1:projects.locations.keyRings' - # keyring resource - keyring = { - 'name': context.env['name'], - 'type': provider, - 'properties': { - 'parent': parent, - 'keyRingId': keyring_name - } - } - resources.append(keyring) - - # cryptographic key resources - for key in properties.get('keys', []): - key_name = key['cryptoKeyName'].lower() - key_resource = '{}-{}'.format(context.env['name'], key_name) - crypto_key = { - 'name': key_resource, - 'type': provider + '.cryptoKeys', - 'properties': - { - 'parent': keyring_id, - 'cryptoKeyId': key_name, - 'purpose': key.get('cryptoKeyPurpose'), - 'labels': key.get('labels', - {}) - }, - 'metadata': { - 'dependsOn': [context.env['name']] - } - } - - # crypto key optional properties - for prop in ['versionTemplate', 'nextRotationTime', 'rotationPeriod']: - if prop in key: - crypto_key['properties'][prop] = key.get(prop) - resources.append(crypto_key) - - - return { - 'resources': - resources, - 'outputs': - [ - { - 'name': 'keyRing', - 'value': '$(ref.{}.name)'.format(context.env['name']) - } - ] - } diff --git a/dm/templates/kms/kms.py.schema b/dm/templates/kms/kms.py.schema deleted file mode 100644 index ff7e4bae89f..00000000000 --- a/dm/templates/kms/kms.py.schema +++ /dev/null @@ -1,151 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Google Cloud KMS KeyRing and Keys - version: 1.1.0 - author: Sourced Group Inc. - description: | - Creates a Cloud KMS KeyRing and cryptographic keys. - - For more information on this resource: - https://cloud.google.com/kms/docs/reference/rest. - - APIs endpoints used by this template: - - gcp-types/cloudkms-v1:projects.locations.keyRings => - https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings - - gcp-types/cloudkms-v1:cloudkms.projects.locations => - https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings - -additionalProperties: false - -required: - - keyRingName - -properties: - keyRingName: - type: string - pattern: ^[a-zA-Z0-9_-]{1,63} - description: | - The name for the KeyRing. Must be unique within a location. - project: - type: string - description: | - The project ID of the project containing the keyring. - region: - type: string - default: global - description: The KeyRing location. - keys: - type: array - uniqueItems: True - items: - type: object - additionalProperties: false - description: The CryptoKey object. - required: - - cryptoKeyName - properties: - cryptoKeyName: - type: string - pattern: ^[a-zA-Z0-9_-]{1,63} - description: | - The name for the CrytoKey. Must be unique within a KeyRing. - cryptoKeyPurpose: - type: string - default: ENCRYPT_DECRYPT - description: | - The immutable purpose of the CryptoKey. Describes the - cryptographic capabilities of the CryptoKey. - enum: - - CRYPTO_KEY_PURPOSE_UNSPECIFIED - - ENCRYPT_DECRYPT - - ASYMMETRIC_SIGN - - ASYMMETRIC_DECRYPT - nextRotationTime: - type: string - description: | - The time when the Key Management Service will automatically - create a new version of the CryptoKey and mark the new version - as primary. Keys with the ENCRYPT_DECRYPT purpose support - automatic rotation. For all other keys, this field must be left - blank. The timestamp is in the RFC3339 UTC "Zulu" format, - accurate to nanoseconds; e.g., "2014-10-02T15:01:23.045123456Z". - rotationPeriod: - type: string - description: | - nextRotationTime will be advanced by this period when the service - automatically rotates a key. Must be at least one day. - If rotationPeriod is set, nextRotationTime must also be set. - Keys with purpose ENCRYPT_DECRYPT support automatic rotation. - For other keys, this field must be omitted. A duration in seconds - with up to nine fractional digits, terminated by 's'. Example '3.5s' - versionTemplate: - type: object - additionalProperties: false - description: | - The template that controls properties of new CryptoKeyVersion - instances created by either cryptoKeyVersions.create or - auto-rotation. - properties: - protectionLevel: - type: string - default: SOFTWARE - description: | - The level of protection to use when creating a CryptoKeyVersion - based on the template. - enum: - - PROTECTION_LEVEL_UNSPECIFIED - - SOFTWARE - - HSM - algorithm: - type: string - default: GOOGLE_SYMMETRIC_ENCRYPTION - description: | - The CryptoKeyVersion algorithm, defines the parameters - to be used for each cryptographic operation. - enum: - - CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED - - GOOGLE_SYMMETRIC_ENCRYPTION - - RSA_SIGN_PSS_2048_SHA256 - - RSA_SIGN_PSS_3072_SHA256 - - RSA_SIGN_PSS_4096_SHA256 - - RSA_SIGN_PSS_4096_SHA512 - - RSA_SIGN_PKCS1_2048_SHA256 - - RSA_SIGN_PKCS1_3072_SHA256 - - RSA_SIGN_PKCS1_4096_SHA256 - - RSA_SIGN_PKCS1_4096_SHA512 - - RSA_DECRYPT_OAEP_2048_SHA256 - - RSA_DECRYPT_OAEP_3072_SHA256 - - RSA_DECRYPT_OAEP_4096_SHA256 - - RSA_DECRYPT_OAEP_4096_SHA512 - - EC_SIGN_P256_SHA256 - - EC_SIGN_P384_SHA384 - labels: - type: object - description: | - Labels with user-defined metadata. For more info, see - https://cloud.google.com/kms/docs/labeling-keys. - -outputs: - keyRing: - type: string - description: Path to the KeyRing resource. - -documentation: - - templates/kms/README.md - -examples: - - templates/kms/examples/kms.yaml - - templates/kms/examples/kms_signkey.yaml diff --git a/dm/templates/kms/tests/integration/kms.bats b/dm/templates/kms/tests/integration/kms.bats deleted file mode 100644 index 48a7961e677..00000000000 --- a/dm/templates/kms/tests/integration/kms.bats +++ /dev/null @@ -1,138 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" - # test specific variables - export KEYRING_NAME="test-keyring-${RAND}" - export REGION="global" - export KEY_NAME="test-key-${RAND}" - export SA_NAME="test-kms-${RAND}" - export SA_FQDN="${SA_NAME}@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com" - export ROLE="roles/cloudkms.admin" - export KEY_PURPOSE="ENCRYPT_DECRYPT" - # export NEXT_ROTATION_TIME=$(date -d '2 months' '+%Y-%m-%dT%H:%M:%S.%NZ') -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < ${BATS_TEST_DIRNAME}/${TEST_NAME}.yaml > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - # Create service accounts to test IAM bindings. - gcloud iam service-accounts create "${SA_NAME}" \ - --display-name "Test KMS Service Account" - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - delete_config - rm -f "${RANDOM_FILE}" - # Delete service account after tests had been completed. - gcloud --quiet iam service-accounts delete "${SA_FQDN}" - fi - - # Per-test teardown steps. -} - -########## TESTS ########## - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - run gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config "${CONFIG}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] -} - -sleep 5 - -@test "Verifying the KeyRing ${KEYRING_NAME} was created " { - run gcloud kms keyrings list --location ${REGION} \ - --format="value(name.scope(keyRings))" \ - --filter="${KEYRING_NAME}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${KEYRING_NAME}" ]] -} - -@test "KEY ${KEY_NAME} is created in KeyRing ${KEYRING_NAME} " { - run gcloud kms keys list --location ${REGION} --keyring="${KEYRING_NAME}" \ - --format="value(name.scope(cryptoKeys))" \ - --filter="${KEY_NAME}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${KEY_NAME}" ]] -} - -@test "CryptoKey's PURPOSE is set to ${KEY_PURPOSE} " { - run gcloud kms keys describe ${KEY_NAME} --location ${REGION} \ - --keyring="${KEYRING_NAME}" \ - --format="value(purpose)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${KEY_PURPOSE}" ]] -} - -@test "protectionLevel on key ${KEY_NAME} = SOFTWARE " { - run gcloud kms keys describe ${KEY_NAME} --location ${REGION} \ - --keyring="${KEYRING_NAME}" \ - --format="value(versionTemplate.protectionLevel)" - [[ "$status" -eq 0 ]] - [[ "$output" -eq "SOFTWARE" ]] -} - -@test "Enc algorithm on key ${KEY_NAME} = GOOGLE_SYMMETRIC_ENCRYPTION" { - run gcloud kms keys describe ${KEY_NAME} --location ${REGION} \ - --keyring="${KEYRING_NAME}" \ - --format="value(versionTemplate.algorithm)" - [[ "$status" -eq 0 ]] - [[ "$output" -eq "GOOGLE_SYMMETRIC_ENCRYPTION" ]] -} - -@test "Verify whether ${SA_NAME} has role ${ROLE} " { - run gcloud kms keys get-iam-policy ${KEY_NAME} --location ${REGION} \ - --keyring="${KEYRING_NAME}" \ - --format="value(bindings.role)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${ROLE}" ]] -} - -@test "Verify if ${SA_NAME} has access to ${KEY_NAME} " { - run gcloud kms keys get-iam-policy ${KEY_NAME} --location ${REGION} \ - --keyring="${KEYRING_NAME}" \ - --format="value(bindings.members[0])" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${SA_FQDN}" ]] -} - -########### NOTE ################## -# There is no Delete Deployment step because KeyRings, Keys cannot be deleted. -################################## diff --git a/dm/templates/kms/tests/integration/kms.yaml b/dm/templates/kms/tests/integration/kms.yaml deleted file mode 100644 index 1d5d2b46de5..00000000000 --- a/dm/templates/kms/tests/integration/kms.yaml +++ /dev/null @@ -1,15 +0,0 @@ -# Test of the Google Cloud KMS KeyRing template. - -imports: - - path: templates/kms/kms.py - name: kms.py - -resources: - - name: test-kms - type: kms.py - properties: - keyRingName: ${KEYRING_NAME} - region: ${REGION} - keys: - - cryptoKeyName: ${KEY_NAME} - cryptoKeyPurpose: ${KEY_PURPOSE} \ No newline at end of file diff --git a/dm/templates/logsink/README.md b/dm/templates/logsink/README.md deleted file mode 100644 index b475e34e765..00000000000 --- a/dm/templates/logsink/README.md +++ /dev/null @@ -1,115 +0,0 @@ -# Logsink - -This template creates a logsink (logging sink). The logsink destination can -exist prior to creating the logsink or can be created by the logsink template. -If the resources are created by the logsink, the logsink uniqueWriter service -account will be granted the appropriate permissions to the destination -resource. - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) -- Create one of the following: - - [GCS bucket](https://cloud.google.com/storage/docs/json_api/v1/buckets) - - [PubSub topic](https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.topics) - - [BigQuery dataset](https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets) -- Grant the [logging.configWriter or logging.admin](https://cloud.google.com/logging/docs/access-control) - IAM role to the project service account -- Grant the [`pubsub.admin`](https://cloud.google.com/pubsub/docs/access-control) - IAM role to the project service account if creating a pubsub logging sink - destination -- Grant the [`storage.admin`](https://cloud.google.com/storage/docs/access-control/iam-roles) - IAM role to the project service account if creating a bucket logging sink - destination -- Grant the [`bigquery.admin`](https://cloud.google.com/bigquery/docs/access-control) - IAM role to the project service account if creating bq logging sink - destination - -#### If you are going to create bucket, pubsub or BigQuery destinations in current project: - -- Grant the [resourcemanager.projectIamAdmin or owner](https://cloud.google.com/iam/docs/understanding-roles) IAM role on the project to the *DM Service Account* to grant roles within the project -- Grant the [roles/resourcemanager.folderIamAdmin owner](https://cloud.google.com/iam/docs/understanding-roles) IAM role on the folder to the *DM Service Account* to grant roles within the folder -- Grant the [roles/iam.securityAdmin or owner](https://cloud.google.com/iam/docs/understanding-roles) IAM role on the organization to the *DM Service Account* to grant roles within the organization and all nested resources -- Grant the [logging.configWriter or logging.admin](https://cloud.google.com/logging/docs/access-control) IAM role on the project to the *DM Service Account* to grant roles within the project - -## If you specify destination project and are going to create bucket, pubsub or BigQuery destinations: - -- Grant the [resourcemanager.projectIamAdmin or owner](https://cloud.google.com/iam/docs/understanding-roles) IAM role on the project to the *DM Service Account* to grant roles within the project -- Grant the [roles/resourcemanager.folderIamAdmin owner](https://cloud.google.com/iam/docs/understanding-roles) IAM role on the folder to the *DM Service Account* to grant roles within the folder -- Grant the [roles/iam.securityAdmin or owner](https://cloud.google.com/iam/docs/understanding-roles) IAM role on the organization to the *DM Service Account* to grant roles within the organization and all nested resources -- Grant the [logging.configWriter or logging.admin](https://cloud.google.com/logging/docs/access-control) IAM role on the project to the *DM Service Account* to grant roles within the project - IAM role to the project service account -- Grant the [`pubsub.admin`](https://cloud.google.com/pubsub/docs/access-control) - IAM role to the project service account if creating a pubsub logging sink - destination -- Grant the [`storage.admin`](https://cloud.google.com/storage/docs/access-control/iam-roles) - IAM role to the project service account if creating a bucket logging sink - destination -- Grant the [`bigquery.admin`](https://cloud.google.com/bigquery/docs/access-control) - IAM role to the project service account if creating bq logging sink - destination - -## Deployment - -### Resources - -- [logging.v2.sink](https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks) -- [pubsub.v1.topic](https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.topics) -- [storage.v1.bucket](https://cloud.google.com/storage/docs/creating-buckets) -- [bigquery.v2.dataset](https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets) - -### Properties - -See `properties` section in the schema file(s): - -- [Logsink](logsink.py.schema) - -### Usage - -1. Clone the [Deployment Manager samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit): - -```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit -``` - -2. Go to the [dm](../../) directory: - -```shell - cd dm -``` - -3. Copy the example DM config to be used as a model for the deployment; in this - case, [examples/logsink.yaml](examples/logsink.yaml): - -```shell - cp templates/logsink/examples/logsink.yaml my_logsink.yaml -``` - -4. Change the values in the config file to match your specific GCP setup (for - properties, refer to the schema files listed above): - -```shell - vim my_logsink.yaml # <== change values to match your GCP setup -``` - -5. Create your deployment (replace \ with the relevant - deployment name): - -```shell - gcloud deployment-manager deployments create \ - --config my_logsink.yaml -``` - -6. In case you need to delete your deployment: - -```shell - gcloud deployment-manager deployments delete -``` - -## Examples - -- [Organization logging entries exported to PubSub](examples/org_logsink_pubsub_destination.yaml) -- [Billing account logging entries exported to Storage](examples/billingaccount_logsink_bucket_destination.yaml) -- [Folder logging entries exported to BigQuery](examples/folder_logsink_bq_destination.yaml) -- [Project logging entries exported to Storage](examples/project_logsink_bucket_destination.yaml) diff --git a/dm/templates/logsink/examples/billingaccount_logsink_bucket_destination.yaml b/dm/templates/logsink/examples/billingaccount_logsink_bucket_destination.yaml deleted file mode 100644 index faa23b66a91..00000000000 --- a/dm/templates/logsink/examples/billingaccount_logsink_bucket_destination.yaml +++ /dev/null @@ -1,42 +0,0 @@ -# Example of a billing account logsink with a bucket destination template -# usage. -# -# In this example, two billing account log sinks are created. One with a -# bucket destination name, another that also creates the bucket. -# -# Replace the following with appropirate values -# : The billing account to create the logsink for. -# : The name of a GCS bucket that exists. -# : The name of the GCS bucket to create. -# : A valid user account to give permissions -# to the bucket. -# - -imports: - - path: templates/logsink/logsink.py - name: logsink.py - -resources: - # Billing account sink with a bucket destination - - name: test-billing-logsink-bucket - type: logsink.py - properties: - billingAccountId: - destinationName: - destinationType: storage - uniqueWriterIdentity: true - - # Billing account sink with a bucket destination that is created - - name: test-billing-logsink-create-bucket - type: logsink.py - properties: - billingAccountId: - destinationName: - destinationType: storage - uniqueWriterIdentity: true - storageProperties: - location: us-east1 - bindings: - - role: roles/storage.objectViewer - members: - - user: diff --git a/dm/templates/logsink/examples/folder_logsink_bq_destination.yaml b/dm/templates/logsink/examples/folder_logsink_bq_destination.yaml deleted file mode 100644 index 5878d876641..00000000000 --- a/dm/templates/logsink/examples/folder_logsink_bq_destination.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# Example of a folder logsink with a BigQuery dataset destination template -# usage. -# -# In this example, a folder log sink is created with a BigQuery dataset -# destination name. -# -# Replace the following with appropirate values -# : The folder ID to create the losink for. -# : The name of a BigQuery dataset that exists. -# - -imports: - - path: templates/logsink/logsink.py - name: logsink.py - -resources: - # Folder sink with a BigQuery dataset destination. - - name: test-folder-logsink-bq - type: logsink.py - properties: - folderId: - destinationName: - destinationType: bigquery - uniqueWriterIdentity: true diff --git a/dm/templates/logsink/examples/logsink_new_bq_destination_in_external_project.yaml b/dm/templates/logsink/examples/logsink_new_bq_destination_in_external_project.yaml deleted file mode 100644 index a125bcbc3fc..00000000000 --- a/dm/templates/logsink/examples/logsink_new_bq_destination_in_external_project.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# Example of a project logsink with a big query destination template usage, where BigQuery dataset will be created in another project. -# -# In this example, single log sink project is created with a BigQuery -# destination name and destination project -# -# Replace the following with appropirate values -# : The project id to create the logsink for. -# : The project id to create target BigQuery dataset for. -# : The name of a BigQuery dataset that does not exist. -# - -imports: - - path: templates/logsink/logsink.py - name: logsink.py - -resources: - - name: test-logsink-bq - type: logsink.py - properties: - projectId: - destinationName: - destinationType: bigquery - uniqueWriterIdentity: true - destinationProject: - bqProperties: - location: US diff --git a/dm/templates/logsink/examples/logsink_new_bucket_destination_in_external_project.yaml b/dm/templates/logsink/examples/logsink_new_bucket_destination_in_external_project.yaml deleted file mode 100644 index 859147cfbfb..00000000000 --- a/dm/templates/logsink/examples/logsink_new_bucket_destination_in_external_project.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# Example of a project logsink with a bucket destination template usage, where bucket will be created in another project. -# -# In this example, single log sink project is created with a bucket -# destination name and destination project -# -# Replace the following with appropirate values -# : The project id to create the logsink for. -# : The project id to create the target bucket for. -# : The name of a GCS bucket that does not exist. -# - -imports: - - path: templates/logsink/logsink.py - name: logsink.py - -resources: - - name: log-sink - type: logsink.py - properties: - projectId: - destinationName: - destinationType: storage - destinationProject: - uniqueWriterIdentity: true - storageProperties: - location: us-east1 diff --git a/dm/templates/logsink/examples/logsink_new_pubsub_destination_in_external_project.yaml b/dm/templates/logsink/examples/logsink_new_pubsub_destination_in_external_project.yaml deleted file mode 100644 index b14e8c066e7..00000000000 --- a/dm/templates/logsink/examples/logsink_new_pubsub_destination_in_external_project.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# Example of a project logsink with a pubsub destination template usage. -# -# In this example, single log sink project is created with a pubsub -# destination name and target project destination name -# -# Replace the following with appropirate values -# : The project id to create the logsink for. -# : The project id to create the target pubsub topic for. -# : The name of a pubsub topic that does not exists. -# - -imports: - - path: templates/logsink/logsink.py - name: logsink.py - -resources: - - name: test-org-logsink-create-pubsub - type: logsink.py - properties: - projectId: - destinationName: - destinationType: pubsub - uniqueWriterIdentity: true - destinationProject: - pubsubProperties: - topic: diff --git a/dm/templates/logsink/examples/org_logsink_pubsub_destination.yaml b/dm/templates/logsink/examples/org_logsink_pubsub_destination.yaml deleted file mode 100644 index b5980248579..00000000000 --- a/dm/templates/logsink/examples/org_logsink_pubsub_destination.yaml +++ /dev/null @@ -1,50 +0,0 @@ -# Example of a organization logsink with a pubsub destination template usage. -# -# In this example, two organization log sinks are created. One with a pubsub -# destination name, another that also creates the pubsub. -# -# Replace the following with appropirate values -# : The organization ID to create the logsink for. -# : The name of a pubsub topic that exists. -# : The pubsub topic name. -# : A pubsub topic name. -# : A valid user account to give permissions -# to the pubsub topic. -# - -imports: - - path: templates/logsink/logsink.py - name: logsink.py - -resources: - # Organization sink with a PubSub topic destination - - name: test-org-logsink-pubsub - type: logsink.py - properties: - orgId: - # When using a PubSub topic, the value must be the topic ID. The ID must - # contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). - # The maximum length is 1,024 characters. - destinationName: - destinationType: pubsub - uniqueWriterIdentity: true - - # Organization sink with a pubsub destination that is created. - - name: test-org-logsink-create-pubsub - type: logsink.py - properties: - orgId: - # When using a PubSub topic, the value must be the topic ID. The ID must - # contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). - # The maximum length is 1,024 characters. - destinationName: - destinationType: pubsub - uniqueWriterIdentity: true - # Properties for the pubsub destination to be created. - # Refer to templates/pubsub/pubsub.py.schema for supported properties. - pubsubProperties: - topic: - accessControl: - - role: roles/pubsub.admin - members: - - user: diff --git a/dm/templates/logsink/examples/project_logsink_bucket_destination.yaml b/dm/templates/logsink/examples/project_logsink_bucket_destination.yaml deleted file mode 100644 index 05f96127c26..00000000000 --- a/dm/templates/logsink/examples/project_logsink_bucket_destination.yaml +++ /dev/null @@ -1,41 +0,0 @@ -# Example of a project logsink with a bucket destination template usage. -# -# In this example, two project log sinks are created. One with a bucket -# destination name, another that also creates the bucket. -# -# Replace the following with appropirate values -# : The project id to create the logsink for. -# : The name of a GCS bucket that exists. -# : The name of the GCS bucket to create. -# : A valid user account to give permissions -# to the bucket. -# - -imports: - - path: templates/logsink/logsink.py - name: logsink.py - -resources: - # Project sink with a bucket destination - - name: test-project-logsink-bucket - type: logsink.py - properties: - projectId: - destinationName: - destinationType: storage - uniqueWriterIdentity: true - - # Project sink with a bucket destination that is created - - name: test-project-logsink-create-bucket - type: logsink.py - properties: - projectId: - destinationName: - destinationType: storage - uniqueWriterIdentity: true - storageProperties: - location: us-east1 - bindings: - - role: roles/storage.objectViewer - members: - - user: diff --git a/dm/templates/logsink/logsink.py b/dm/templates/logsink/logsink.py deleted file mode 100644 index a2f31718f4d..00000000000 --- a/dm/templates/logsink/logsink.py +++ /dev/null @@ -1,243 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" This template creates a logsink (logging sink). """ - - -def create_pubsub(context, logsink_name): - """ Create the pubsub destination. """ - - properties = context.properties - project_id = properties.get('destinationProject', properties.get('project', context.env['project'])) - - dest_properties = [] - if 'pubsubProperties' in context.properties: - dest_prop = context.properties['pubsubProperties'] - dest_prop['name'] = context.properties['destinationName'] - dest_prop['project'] = project_id - access_control = dest_prop.get('accessControl', []) - access_control.append( - { - 'role': 'roles/pubsub.admin', - 'members': ['$(ref.' + logsink_name + '.writerIdentity)'] - } - ) - - dest_prop['accessControl'] = access_control - dest_properties = [ - { - 'name': '{}-pubsub'.format(context.env['name']), - 'type': 'pubsub.py', - 'properties': dest_prop - }, - { - 'name': '{}-iam-member-pub-sub-policy'.format(context.env['name']), - 'type': 'iam_member.py', - 'properties': - { - 'projectId': project_id, - 'dependsOn': [logsink_name], - 'roles': [{ - 'role': 'roles/pubsub.admin', - 'members': ['$(ref.{}.writerIdentity)'.format(logsink_name)] - }] - } - } - ] - - return dest_properties - - -def create_bq_dataset(context, logsink_name): - """ Create the BQ dataset destination. """ - - properties = context.properties - project_id = properties.get('destinationProject', properties.get('project', context.env['project'])) - - dest_properties = [] - if 'bqProperties' in context.properties: - dest_prop = context.properties['bqProperties'] - dest_prop['name'] = context.properties['destinationName'] - dest_prop['project'] = project_id - - ## NOTE: There is a issue where BQ does not accept the uniqueWriter - ## returned by the logsink to be used in the userByEmail property. - ## Until that is resolved, this property is not supported. - # access = dest_prop.get('access', []) - # access.append( - # { - # 'role': 'roles/bigquery.admin', - # 'userByEmail': '$(ref.' + logsink_name + '.writerIdentity)' - # } - # ) - # - # dest_prop['access'] = access - - dest_properties = [ - { - 'name': '{}-bigquery-dataset'.format(context.env['name']), - 'type': 'bigquery_dataset.py', - 'properties': dest_prop - }, - { - 'name': '{}-iam-member-bigquery-policy'.format(context.env['name']), - 'type': 'iam_member.py', - 'properties': - { - 'projectId': project_id, - 'dependsOn': [logsink_name], - 'roles': [{ - 'role': 'roles/bigquery.admin', - 'members': ['$(ref.{}.writerIdentity)'.format(logsink_name)] - }] - } - } - ] - - return dest_properties - - -def create_storage(context, logsink_name): - """ Create the bucket destination. """ - - properties = context.properties - project_id = properties.get('destinationProject', properties.get('project', context.env['project'])) - - dest_properties = [] - if 'storageProperties' in context.properties: - bucket_name = context.properties['destinationName'] - dest_prop = context.properties['storageProperties'] - dest_prop['name'] = bucket_name - dest_prop['project'] = project_id - bindings = dest_prop.get('bindings', []) - bindings.append({ - 'role': 'roles/storage.admin', - 'members': ['$(ref.{}.writerIdentity)'.format(logsink_name)] - }) - - # Do not set any IAM during the creation of the bucket since - # we are going to set it afterwards - if 'bindings' in dest_prop: - del dest_prop['bindings'] - - dest_properties = [ - { - # Create the GCS Bucket - 'name': bucket_name, - 'type': 'gcs_bucket.py', - 'properties': dest_prop - }, - { - # Give the logsink writerIdentity permissions to the bucket - 'name': '{}-iam-member-bucket-policy'.format(bucket_name), - 'type': 'iam_member.py', - 'properties': - { - 'bucket': bucket_name, - 'dependsOn': [logsink_name], - 'roles': bindings - } - } - ] - - return dest_properties - - -def generate_config(context): - """ Entry point for the deployment resources. """ - - properties = context.properties - name = properties.get('name', context.env['name']) - project_id = properties.get('project', context.env['project']) - - properties = { - 'name': name, - 'uniqueWriterIdentity': context.properties['uniqueWriterIdentity'], - 'sink': name, - } - - if 'orgId' in context.properties: - source_id = str(context.properties.get('orgId')) - source_type = 'organizations' - properties['organization'] = str(source_id) - elif 'billingAccountId' in context.properties: - source_id = context.properties.get('billingAccountId') - source_type = 'billingAccounts' - del properties['sink'] - elif 'folderId' in context.properties: - source_id = str(context.properties.get('folderId')) - source_type = 'folders' - properties['folder'] = str(source_id) - elif 'projectId' in context.properties: - source_id = context.properties.get('projectId') - source_type = 'projects' - - properties['parent'] = '{}/{}'.format(source_type, source_id) - - dest_properties = [] - if context.properties['destinationType'] == 'pubsub': - dest_properties = create_pubsub(context, name) - destination = 'pubsub.googleapis.com/projects/{}/topics/{}'.format( - project_id, - context.properties['destinationName'] - ) - elif context.properties['destinationType'] == 'storage': - dest_properties = create_storage(context, name) - destination = 'storage.googleapis.com/{}'.format( - context.properties['destinationName'] - ) - elif context.properties['destinationType'] == 'bigquery': - dest_properties = create_bq_dataset(context, name) - destination = 'bigquery.googleapis.com/projects/{}/datasets/{}'.format( - project_id, - context.properties['destinationName'] - ) - - properties['destination'] = destination - - sink_filter = context.properties.get('filter') - if sink_filter: - properties['filter'] = sink_filter - - # https://cloud.google.com/logging/docs/reference/v2/rest/v2/folders.sinks - # https://cloud.google.com/logging/docs/reference/v2/rest/v2/billingAccounts.sinks - # https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks - # https://cloud.google.com/logging/docs/reference/v2/rest/v2/organizations.sinks - base_type = 'gcp-types/logging-v2:' - resource = { - 'name': context.env['name'], - 'type': base_type + source_type + '.sinks', - 'properties': properties - } - resources = [resource] - - if dest_properties: - resources.extend(dest_properties) - if context.properties['destinationType'] == 'storage': - # GCS Bucket needs to be created first before the sink whereas - # pub/sub and BQ do not. This might change in the future. - resource['metadata'] = { - 'dependsOn': [dest_properties[0]['name']] - } - - return { - 'resources': - resources, - 'outputs': - [ - { - 'name': 'writerIdentity', - 'value': '$(ref.{}.writerIdentity)'.format(context.env['name']) - } - ] - } diff --git a/dm/templates/logsink/logsink.py.schema b/dm/templates/logsink/logsink.py.schema deleted file mode 100644 index 1b161c4f1be..00000000000 --- a/dm/templates/logsink/logsink.py.schema +++ /dev/null @@ -1,196 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Logging Sink - author: Sourced Group Inc. - version: 1.1.0 - description: | - Creates a logging sink to export entries to a desired destination. - - For more information on this resource: - - https://cloud.google.com/logging/docs/reference/v2/rest/ - - APIs endpoints used by this template: - - gcp-types/storage-v1:storage.buckets.setIamPolicy => - https://cloud.google.com/storage/docs/json_api/v1/buckets/setIamPolicy - - gcp-types/logging-v2:folders.sinks => - https://cloud.google.com/logging/docs/reference/v2/rest/v2/folders.sinks - - gcp-types/logging-v2:billingAccounts.sinks => - https://cloud.google.com/logging/docs/reference/v2/rest/v2/billingAccounts.sinks - - gcp-types/logging-v2:projects.sinks => - https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks - - gcp-types/logging-v2:organizations.sinks => - https://cloud.google.com/logging/docs/reference/v2/rest/v2/organizations.sinks - -imports: - - path: ../pubsub/pubsub.py - name: pubsub.py - - path: ../bigquery/bigquery_dataset.py - name: bigquery_dataset.py - - path: ../gcs_bucket/gcs_bucket.py - name: gcs_bucket.py - - path: ../iam_member/iam_member.py - name: iam_member.py - -additionalProperties: false - -required: - - destinationType - - destinationName - -oneOf: - - required: - - projectId - - required: - - orgId - - required: - - billingAccountId - - required: - - folderId - -properties: - name: - type: string - description: | - Name of the sink. Resource name would be used if omitted. - project: - type: string - description: | - The project ID of the project containing resources. The - Google apps domain is prefixed if applicable. - destinationName: - type: string - description: | - An identifier of the destination resource, such as: a name for a Bucket, - a topic ID for a PubSub, or a dataset ID for a BigQuery. The IDs must - contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). - The maximum ID length is 1,024 characters. - destinationType: - type: string - description: The type of the destination resource. - enum: - - pubsub - - storage - - bigquery - uniqueWriterIdentity: - type: boolean - description: | - Determines the kind of the IAM identity returned as writerIdentity for the - new sink. If "true", the sink is owned by a non-project resource such - as an organization. In this case, the writerIdentity value is a unique - serivce account used only for exports from the new sink. - default: true - filter: - type: string - description: | - An advanced log filter that limits the export to those log entries that: - (a) are in the resource that owns the sink; and - (b) matches the filter condition(s). - destinationProject: - type: string - description: | - Bucket project name. - pubsubProperties: - type: object - description: | - Pubsub properties as defined in the `templates/pubsub/pubsub.py.schema`. - Example: - - name: test-logsink-project-pubsub - type: logsink.py - properties: - destinationName: test-logsink-project-pubsub-dest - destinationType: pubsub - projectId: 1234567890 - uniqueWriterIdentity: true - pubsubProperties: - topic: test-logsink-project-pubsub-topic-dest - accessControl: - - role: roles/pubsub.admin - members: - - user:my-email@email.com - storageProperties: - type: object - description: | - Bucket properties as defined in the - `templates/gcs_bucket/gcs_bucket.py.schema`. - Example: - - name: test-logsink-project-storage-create - type: logsink.py - properties: - destinationName: test-logsink-project-storage-dest - destinationType: storage - projectId: 1234567890 - uniqueWriterIdentity: true - storageProperties: - location: us-east1 - bindings: - - role: roles/storage.objectViewer - members: - - user:my-email@email.com - bqProperties: - type: object - description: | - NOTE: There is a issue where BQ does not accept the uniqueWriter - returned by the logsink to be used in the userByEmail property. - Until that is resolved, this property is not supported. - BigQuery dataset properties as defined in the - `templates/bigquery/bigquery_dataset.py.schema`. - Example: - - name: test-logsink-project-bq-create - type: logsink.py - properties: - # BQ names use underscores - destinationName: test_logsink_project_bq_dest - destinationType: bigquery - projectId: 1234567890 - uniqueWriterIdentity: true - bqProperties: - location: US - access: - - role: OWNER - userByEmail: my-email@email.com - projectId: - type: - - string - - number - description: | - Project ID to add sink to - orgId: - type: - - string - - number - description: | - Org ID to add sink to - billingAccountId: - type: - - string - - number - description: | - Billing account ID to add sink to - folderId: - type: - - string - - number - description: | - Folder ID to add sink to - -documentation: - - templates/logsink/README.md - -examples: - - templates/logsink/examples/org_logsink_pubsub_destination.yaml - - templates/logsink/examples/billingaccount_logsink_bucket_destination.yaml - - templates/logsink/examples/folder_logsink_bq_destination.yaml - - templates/logsink/examples/project_logsink_bucket_destination.yaml diff --git a/dm/templates/logsink/tests/integration/logsink.bats b/dm/templates/logsink/tests/integration/logsink.bats deleted file mode 100644 index 0a4c1bc5f4b..00000000000 --- a/dm/templates/logsink/tests/integration/logsink.bats +++ /dev/null @@ -1,333 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < ${BATS_TEST_DIRNAME}/${TEST_NAME}.yaml > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function get_test_folder_id() { - # Get the test folder ID and make it available - TEST_ORG_FOLDER_NAME=$(gcloud alpha resource-manager folders list \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --organization "${CLOUD_FOUNDATION_ORGANIZATION_ID}" | \ - grep "test-org-folder-${RAND}") - - export TEST_ORG_FOLDER_NAME=`echo ${TEST_ORG_FOLDER_NAME} | cut -d ' ' -f 3` -} - -function setup() { - # Global setup; this is executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - gcloud alpha resource-manager folders create \ - --display-name="test-org-folder-${RAND}" \ - --organization="${CLOUD_FOUNDATION_ORGANIZATION_ID}" - get_test_folder_id - create_config - gcloud pubsub topics create test-${RAND} - gsutil mb -l us-east1 gs://test-bucket-${RAND}/ - bq mk test_dataset_${RAND} - fi - - # Per-test setup as per documentation. - get_test_folder_id -} - -function teardown() { - # Global teardown; this is executed once per test file - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - get_test_folder_id - gcloud alpha resource-manager folders delete "${TEST_ORG_FOLDER_NAME}" - gsutil rm -r gs://test-bucket-${RAND}/ - gcloud pubsub topics delete test-topic-${RAND} - bq rm -rf test_dataset_${RAND} - delete_config - rm -f "${RANDOM_FILE}" - fi - - # Per-test teardown as per documentation. -} - - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - run gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config "${CONFIG}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - echo "Status: $status" - echo "Output: $output" - - [[ "$status" -eq 0 ]] -} - -@test "Verifying project sinks were created each with the requested destination in deployment ${DEPLOYMENT_NAME}" { - run gcloud logging sinks list --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - echo "Status: $status" - echo "Output: $output" - - [[ "$status" -eq 0 ]] - [[ "$output" =~ "test-logsink-project-bq-${RAND}" ]] - [[ "$output" =~ "test-logsink-project-pubsub-${RAND}" ]] - [[ "$output" =~ "test-logsink-project-storage-${RAND}" ]] -} - -@test "Verifying organization sinks were created each with a different as the destination in deployment ${DEPLOYMENT_NAME}" { - run gcloud logging sinks list \ - --organization "${CLOUD_FOUNDATION_ORGANIZATION_ID}" - - echo "Status: $status" - echo "Output: $output" - - [[ "$status" -eq 0 ]] - [[ "$output" =~ "test-logsink-org-bq-${RAND}" ]] - [[ "$output" =~ "test-logsink-org-pubsub-${RAND}" ]] - [[ "$output" =~ "test-logsink-org-storage-${RAND}" ]] -} - -@test "Verifying billing account sinks were created each with a different as the destination in deployment ${DEPLOYMENT_NAME}" { - run gcloud logging sinks list --billing-account \ - "${CLOUD_FOUNDATION_BILLING_ACCOUNT_ID}" - - echo "Status: $status" - echo "Output: $output" - - [[ "$status" -eq 0 ]] - [[ "$output" =~ "test-logsink-billing-bq-${RAND}" ]] - [[ "$output" =~ "test-logsink-billing-pubsub-${RAND}" ]] - [[ "$output" =~ "test-logsink-billing-storage-${RAND}" ]] -} - -@test "Verifying folder sinks were created each with a different as the destination in deployment ${DEPLOYMENT_NAME}" { - run gcloud logging sinks list --folder "${TEST_ORG_FOLDER_NAME}" - - echo "Status: $status" - echo "Output: $output" - - [[ "$status" -eq 0 ]] - [[ "$output" =~ "test-logsink-folder-bq-${RAND}" ]] - [[ "$output" =~ "test-logsink-folder-pubsub-${RAND}" ]] - [[ "$output" =~ "test-logsink-folder-storage-${RAND}" ]] -} - -@test "Verifying project sinks and the destination resource were created in deployment ${DEPLOYMENT_NAME}" { - run gcloud logging sinks list --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - echo "Status: $status" - echo "Output: $output" - - [[ "$status" -eq 0 ]] - #[[ "$output" =~ "test-logsink-project-bq-${RAND}" ]] - [[ "$output" =~ "test-logsink-project-pubsub-create-${RAND}" ]] - [[ "$output" =~ "test-logsink-project-storage-create-${RAND}" ]] - - run gcloud beta pubsub topics get-iam-policy \ - "test-logsink-project-pubsub-dest-${RAND}" - - echo "Status: $status" - echo "Output: $output" - - [[ "$status" -eq 0 ]] - [[ "$output" =~ "@gcp-sa-logging.iam.gserviceaccount.com" ]] - [[ "$output" =~ "user:${CLOUD_FOUNDATION_USER_ACCOUNT}" ]] - [[ "$output" =~ "role: roles/pubsub.admin" ]] - - run gsutil iam get "gs://test-logsink-project-storage-dest-${RAND}" - - echo "Status: $status" - echo "Output: $output" - - [[ "$status" -eq 0 ]] - [[ "$output" =~ "@gcp-sa-logging.iam.gserviceaccount.com" ]] - [[ "$output" =~ "roles/storage.admin" ]] - [[ "$output" =~ "user:${CLOUD_FOUNDATION_USER_ACCOUNT}" ]] - [[ "$output" =~ "roles/storage.objectViewer" ]] - - #TODO: Add test for BQ -} - -@test "Verifying org sinks and the destination resource were created in deployment ${DEPLOYMENT_NAME}" { - run gcloud logging sinks list \ - --organization "${CLOUD_FOUNDATION_ORGANIZATION_ID}" - - echo "Status: $status" - echo "Output: $output" - - [[ "$status" -eq 0 ]] - #[[ "$output" =~ "test-logsink-org-bq-${RAND}" ]] - [[ "$output" =~ "test-logsink-org-pubsub-create-${RAND}" ]] - [[ "$output" =~ "test-logsink-org-storage-create-${RAND}" ]] - - run gcloud beta pubsub topics get-iam-policy "test-logsink-org-pubsub-dest-${RAND}" - - echo "Status: $status" - echo "Output: $output" - - [[ "$status" -eq 0 ]] - [[ "$output" =~ "@gcp-sa-logging.iam.gserviceaccount.com" ]] - [[ "$output" =~ "user:${CLOUD_FOUNDATION_USER_ACCOUNT}" ]] - [[ "$output" =~ "role: roles/pubsub.admin" ]] - - run gsutil iam get "gs://test-logsink-org-storage-dest-${RAND}" - - echo "Status: $status" - echo "Output: $output" - - [[ "$status" -eq 0 ]] - [[ "$output" =~ "@gcp-sa-logging.iam.gserviceaccount.com" ]] - [[ "$output" =~ "roles/storage.admin" ]] - [[ "$output" =~ "user:${CLOUD_FOUNDATION_USER_ACCOUNT}" ]] - [[ "$output" =~ "roles/storage.objectViewer" ]] - - #TODO: Add test for BQ -} - -@test "Verifying billing sinks and the destination resource were created in deployment ${DEPLOYMENT_NAME}" { - run gcloud logging sinks list \ - --billing-account "${CLOUD_FOUNDATION_BILLING_ACCOUNT_ID}" - - echo "Status: $status" - echo "Output: $output" - - [[ "$status" -eq 0 ]] - #[[ "$output" =~ "test-logsink-billing-bq-${RAND}" ]] - [[ "$output" =~ "test-logsink-billing-pubsub-create-${RAND}" ]] - [[ "$output" =~ "test-logsink-billing-storage-create-${RAND}" ]] - - run gcloud beta pubsub topics get-iam-policy \ - "test-logsink-billing-pubsub-dest-${RAND}" - - echo "Status: $status" - echo "Output: $output" - - [[ "$status" -eq 0 ]] - [[ "$output" =~ "@gcp-sa-logging.iam.gserviceaccount.com" ]] - [[ "$output" =~ "user:${CLOUD_FOUNDATION_USER_ACCOUNT}" ]] - [[ "$output" =~ "role: roles/pubsub.admin" ]] - - run gsutil iam get "gs://test-logsink-billing-storage-dest-${RAND}" - - echo "Status: $status" - echo "Output: $output" - - [[ "$status" -eq 0 ]] - [[ "$output" =~ "@gcp-sa-logging.iam.gserviceaccount.com" ]] - [[ "$output" =~ "roles/storage.admin" ]] - [[ "$output" =~ "user:${CLOUD_FOUNDATION_USER_ACCOUNT}" ]] - [[ "$output" =~ "roles/storage.objectViewer" ]] - - #TODO: Add test for BQ -} - -@test "Verifying folder sinks and the destination resource were created in deployment ${DEPLOYMENT_NAME}" { - run gcloud logging sinks list --folder "${TEST_ORG_FOLDER_NAME}" - - echo "Status: $status" - echo "Output: $output" - - [[ "$status" -eq 0 ]] - #[[ "$output" =~ "test-logsink-folder-bq-${RAND}" ]] - [[ "$output" =~ "test-logsink-folder-pubsub-create-${RAND}" ]] - [[ "$output" =~ "test-logsink-folder-storage-create-${RAND}" ]] - - run gcloud beta pubsub topics get-iam-policy \ - "test-logsink-folder-pubsub-dest-${RAND}" - - echo "Status: $status" - echo "Output: $output" - - [[ "$status" -eq 0 ]] - [[ "$output" =~ "@gcp-sa-logging.iam.gserviceaccount.com" ]] - [[ "$output" =~ "user:${CLOUD_FOUNDATION_USER_ACCOUNT}" ]] - [[ "$output" =~ "role: roles/pubsub.admin" ]] - - run gsutil iam get "gs://test-logsink-folder-storage-dest-${RAND}" - - echo "Status: $status" - echo "Output: $output" - - [[ "$status" -eq 0 ]] - [[ "$output" =~ "@gcp-sa-logging.iam.gserviceaccount.com" ]] - [[ "$output" =~ "roles/storage.admin" ]] - [[ "$output" =~ "user:${CLOUD_FOUNDATION_USER_ACCOUNT}" ]] - [[ "$output" =~ "roles/storage.objectViewer" ]] - - #TODO: Add test for BQ -} - -@test "Deleting deployment" { - run gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" -q \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - echo "Status: $status" - echo "Output: $output" - - [[ "$status" -eq 0 ]] - - run gcloud logging sinks list --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - - echo "Status: $status" - echo "Output: $output" - - #[[ ! "$output" =~ "test-logsink-project-bq-${RAND}" ]] - [[ ! "$output" =~ "test-logsink-project-pubsub-${RAND}" ]] - [[ ! "$output" =~ "test-logsink-project-storage-${RAND}" ]] - - run gcloud logging sinks list \ - --organization "${CLOUD_FOUNDATION_ORGANIZATION_ID}" - - echo "Status: $status" - echo "Output: $output" - - [[ "$status" -eq 0 ]] - #[[ ! "$output" =~ "test-logsink-org-bq-${RAND}" ]] - [[ ! "$output" =~ "test-logsink-org-pubsub-${RAND}" ]] - [[ ! "$output" =~ "test-logsink-org-storage-${RAND}" ]] - - # TODO: Bug where billing accounts are not deleted during deployment delete. - # Re-enable this check once its fixed. - #run gcloud logging sinks list --billing-account \ - # "${CLOUD_FOUNDATION_BILLING_ACCOUNT_ID}" - #[[ "$status" -eq 0 ]] - #[[ ! "$output" =~ "test-logsink-billing-bq-${RAND}" ]] - #[[ ! "$output" =~ "test-logsink-billing-pubsub-${RAND}" ]] - #[[ ! "$output" =~ "test-logsink-billing-storage-${RAND}" ]] - - run gcloud logging sinks list --folder "${TEST_ORG_FOLDER_NAME}" - - echo "Status: $status" - echo "Output: $output" - - [[ "$status" -eq 0 ]] - #[[ ! "$output" =~ "test-logsink-folder-bq-${RAND}" ]] - [[ ! "$output" =~ "test-logsink-folder-pubsub-${RAND}" ]] - [[ ! "$output" =~ "test-logsink-folder-storage-${RAND}" ]] -} diff --git a/dm/templates/logsink/tests/integration/logsink.yaml b/dm/templates/logsink/tests/integration/logsink.yaml deleted file mode 100644 index 94125b33c22..00000000000 --- a/dm/templates/logsink/tests/integration/logsink.yaml +++ /dev/null @@ -1,311 +0,0 @@ -# Test of the logsink template. -# -# Variables: -# RAND: A random string used by the testing suite. -# - -imports: - - path: templates/logsink/logsink.py - name: logsink.py - -resources: - # Project sink with bucket destination - - name: test-logsink-project-storage-${RAND} - type: logsink.py - properties: - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - destinationName: test-bucket-${RAND} - destinationType: storage - uniqueWriterIdentity: true - - # Project sink with pubsub destination - - name: test-logsink-project-pubsub-${RAND} - type: logsink.py - properties: - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - destinationName: test-topic-${RAND} - destinationType: pubsub - uniqueWriterIdentity: true - - # Project sink with bq destination - - name: test-logsink-project-bq-${RAND} - type: logsink.py - properties: - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - destinationName: test_dataset_${RAND} - destinationType: bigquery - uniqueWriterIdentity: true - - # Organization sink with bucket destination - - name: test-logsink-org-storage-${RAND} - type: logsink.py - properties: - orgId: ${CLOUD_FOUNDATION_ORGANIZATION_ID} - destinationName: test-bucket-${RAND} - destinationType: storage - uniqueWriterIdentity: true - - # Organization sink with pubsub destination - - name: test-logsink-org-pubsub-${RAND} - type: logsink.py - properties: - orgId: ${CLOUD_FOUNDATION_ORGANIZATION_ID} - destinationName: test-topic-${RAND} - destinationType: pubsub - uniqueWriterIdentity: true - - # Organization sink with bq destination - - name: test-logsink-org-bq-${RAND} - type: logsink.py - properties: - orgId: ${CLOUD_FOUNDATION_ORGANIZATION_ID} - destinationName: test_dataset_${RAND} - destinationType: bigquery - uniqueWriterIdentity: true - - # Billing Account sink with bucket destination - - name: test-logsink-billing-storage-${RAND} - type: logsink.py - properties: - billingAccountId: ${CLOUD_FOUNDATION_BILLING_ACCOUNT_ID} - destinationName: test-bucket-${RAND} - destinationType: storage - uniqueWriterIdentity: true - - # Billing Account sink with pubsub destination - - name: test-logsink-billing-pubsub-${RAND} - type: logsink.py - properties: - billingAccountId: ${CLOUD_FOUNDATION_BILLING_ACCOUNT_ID} - destinationName: test-topic-${RAND} - destinationType: pubsub - uniqueWriterIdentity: true - - # Billing Account sink with bq destination - - name: test-logsink-billing-bq-${RAND} - type: logsink.py - properties: - billingAccountId: ${CLOUD_FOUNDATION_BILLING_ACCOUNT_ID} - destinationName: test_dataset_${RAND} - destinationType: bigquery - uniqueWriterIdentity: true - - # Folder sink with bucket destination - - name: test-logsink-folder-storage-${RAND} - type: logsink.py - properties: - folderId: ${TEST_ORG_FOLDER_NAME} - destinationName: test-bucket-${RAND} - destinationType: storage - uniqueWriterIdentity: true - - # Folder sink with pubsub destination - - name: test-logsink-folder-pubsub-${RAND} - type: logsink.py - properties: - folderId: ${TEST_ORG_FOLDER_NAME} - destinationName: test-topic-${RAND} - destinationType: pubsub - uniqueWriterIdentity: true - - # Folder sink with bq destination - - name: test-logsink-folder-bq-${RAND} - type: logsink.py - properties: - folderId: ${TEST_ORG_FOLDER_NAME} - destinationName: test_dataset_${RAND} - destinationType: bigquery - uniqueWriterIdentity: true - - # Project sink creating pubsub destination - - name: test-logsink-project-pubsub-create-${RAND} - type: logsink.py - properties: - destinationName: test-logsink-project-pubsub-dest-${RAND} - destinationType: pubsub - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - uniqueWriterIdentity: true - pubsubProperties: - topic: test-logsink-project-pubsub-topic-dest-${RAND} - accessControl: - - role: roles/pubsub.admin - members: - - user:${CLOUD_FOUNDATION_USER_ACCOUNT} - - # Project sink creating bucket destination - - name: test-logsink-project-storage-create-${RAND} - type: logsink.py - properties: - destinationName: test-logsink-project-storage-dest-${RAND} - destinationType: storage - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - uniqueWriterIdentity: true - storageProperties: - location: us-east1 - bindings: - - role: roles/storage.objectViewer - members: - - user:${CLOUD_FOUNDATION_USER_ACCOUNT} - - # Project sink creating a bq destination - # NOTE: There is a issue where BQ does not accept the uniqueWriter - # returned by the logsink to be used in the userByEmail property. - # Until that is resolved, this property is not supported. - # - name: test-logsink-project-bq-create-${RAND} - # type: logsink.py - # properties: - # # BQ names use underscores - # destinationName: test_logsink_project_bq_dest_${RAND} - # destinationType: bigquery - # projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - # uniqueWriterIdentity: true - # bqProperties: - # location: US - # access: - # - role: OWNER - # userByEmail: ${CLOUD_FOUNDATION_USER_ACCOUNT} - - # Organization sink creating pubsub destination - - name: test-logsink-org-pubsub-create-${RAND} - type: logsink.py - properties: - destinationName: test-logsink-org-pubsub-dest-${RAND} - destinationType: pubsub - orgId: ${CLOUD_FOUNDATION_ORGANIZATION_ID} - uniqueWriterIdentity: true - pubsubProperties: - topic: test-logsink-org-pubsub-topic-dest-${RAND} - accessControl: - - role: roles/pubsub.admin - members: - - user:${CLOUD_FOUNDATION_USER_ACCOUNT} - - # Organization sink creating bucket destination - - name: test-logsink-org-storage-create-${RAND} - type: logsink.py - properties: - destinationName: test-logsink-org-storage-dest-${RAND} - destinationType: storage - orgId: ${CLOUD_FOUNDATION_ORGANIZATION_ID} - uniqueWriterIdentity: true - storageProperties: - location: us-east1 - bindings: - - role: roles/storage.objectViewer - members: - - user:${CLOUD_FOUNDATION_USER_ACCOUNT} - - # Organization sink creating bq destination - # NOTE: There is a issue where BQ does not accept the uniqueWriter - # returned by the logsink to be used in the userByEmail property. - # Until that is resolved, this property is not supported. - # - name: test-logsink-org-bq-create-${RAND} - # type: logsink.py - # properties: - # # BQ names use underscores - # destinationName: test_logsink_org_bq_dest_${RAND} - # destinationType: bigquery - # orgId: ${CLOUD_FOUNDATION_ORGANIZATION_ID} - # uniqueWriterIdentity: true - # bqProperties: - # location: US - # access: - # - role: OWNER - # userByEmail: ${CLOUD_FOUNDATION_USER_ACCOUNT} - - # Billing Account sink creating pubsub destination - - name: test-logsink-billing-pubsub-create-${RAND} - type: logsink.py - properties: - destinationName: test-logsink-billing-pubsub-dest-${RAND} - destinationType: pubsub - billingAccountId: ${CLOUD_FOUNDATION_BILLING_ACCOUNT_ID} - uniqueWriterIdentity: true - pubsubProperties: - topic: test-logsink-billing-pubsub-topic-dest-${RAND} - accessControl: - - role: roles/pubsub.admin - members: - - user:${CLOUD_FOUNDATION_USER_ACCOUNT} - - # Billing Account sink creating bucket destination - - name: test-logsink-billing-storage-create-${RAND} - type: logsink.py - properties: - destinationName: test-logsink-billing-storage-dest-${RAND} - destinationType: storage - billingAccountId: ${CLOUD_FOUNDATION_BILLING_ACCOUNT_ID} - uniqueWriterIdentity: true - storageProperties: - location: us-east1 - bindings: - - role: roles/storage.objectViewer - members: - - user:${CLOUD_FOUNDATION_USER_ACCOUNT} - - # Billing Account sink creating bq destination - # NOTE: There is a issue where BQ does not accept the uniqueWriter - # returned by the logsink to be used in the userByEmail property. - # Until that is resolved, this property is not supported. - # - name: test-logsink-billing-bq-create-${RAND} - # type: logsink.py - # properties: - # # BQ names use underscores - # destinationName: test_logsink_billing_bq_dest_${RAND} - # destinationType: bigquery - # billingAccountId: ${CLOUD_FOUNDATION_BILLING_ACCOUNT_ID} - # uniqueWriterIdentity: true - # bqProperties: - # name: test_logsink_billing_bq_dest_${RAND} - # location: US - # access: - # - role: OWNER - # userByEmail: ${CLOUD_FOUNDATION_USER_ACCOUNT} - - # Folder sink creating pubsub destination - - name: test-logsink-folder-pubsub-create-${RAND} - type: logsink.py - properties: - destinationName: test-logsink-folder-pubsub-dest-${RAND} - destinationType: pubsub - folderId: ${TEST_ORG_FOLDER_NAME} - uniqueWriterIdentity: true - pubsubProperties: - topic: test-logsink-folder-pubsub-topic-dest-${RAND} - accessControl: - - role: roles/pubsub.admin - members: - - user:${CLOUD_FOUNDATION_USER_ACCOUNT} - - # Folder sink creating bucket destination - - name: test-logsink-folder-storage-create-${RAND} - type: logsink.py - properties: - destinationName: test-logsink-folder-storage-dest-${RAND} - destinationType: storage - folderId: ${TEST_ORG_FOLDER_NAME} - uniqueWriterIdentity: true - storageProperties: - location: us-east1 - bindings: - - role: roles/storage.objectViewer - members: - - user:${CLOUD_FOUNDATION_USER_ACCOUNT} - - # Folder sink creating bq destination - # NOTE: There is a issue where BQ does not accept the uniqueWriter - # returned by the logsink to be used in the userByEmail property. - # Until that is resolved, this property is not supported. - # - name: test-logsink-folder-bq-create-${RAND} - # type: logsink.py - # properties: - # # BQ names use underscores - # destinationName: test_logsink_folder_bq_dest_${RAND} - # destinationType: bigquery - # folderId: ${TEST_ORG_FOLDER_NAME} - # uniqueWriterIdentity: true - # bqProperties: - # location: US - # access: - # - role: OWNER - # userByEmail: ${CLOUD_FOUNDATION_USER_ACCOUNT} diff --git a/dm/templates/logsink/tests/integration/logsink_external_project.bats b/dm/templates/logsink/tests/integration/logsink_external_project.bats deleted file mode 100644 index 94d35e72a54..00000000000 --- a/dm/templates/logsink/tests/integration/logsink_external_project.bats +++ /dev/null @@ -1,104 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < ${BATS_TEST_DIRNAME}/${TEST_NAME}.yaml > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; this is executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - fi -} - - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - run gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config "${CONFIG}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - echo "Status: $status" - echo "Output: $output" - - [[ "$status" -eq 0 ]] -} - -@test "Verifying project sinks were created each with the requested destination in deployment ${DEPLOYMENT_NAME}" { - run gcloud logging sinks list --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - echo "Status: $status" - echo "Output: $output" - - [[ "$status" -eq 0 ]] - [[ "$output" =~ "test-logsink-project-bq-${RAND}" ]] - [[ "$output" =~ "test-logsink-project-pubsub-${RAND}" ]] - [[ "$output" =~ "test-logsink-project-storage-${RAND}" ]] - - run gcloud beta pubsub topics get-iam-policy \ - "projects/${CLOUD_FOUNDATION_PROJECT_ID}/topics/test-topic-${RAND}" - - echo "Status: $status" - echo "Output: $output" - - [[ "$status" -eq 0 ]] - [[ "$output" =~ "@gcp-sa-logging.iam.gserviceaccount.com" ]] - [[ "$output" =~ "role: roles/pubsub.admin" ]] - - run gsutil iam get "gs://test-bucket-${RAND}" --project "${TARGET_PROJECT_ID}" - - echo "Status: $status" - echo "Output: $output" - - [[ "$status" -eq 0 ]] - [[ "$output" =~ "@gcp-sa-logging.iam.gserviceaccount.com" ]] - [[ "$output" =~ "roles/storage.objectAdmin" ]] -} - -@test "Deleting deployment" { - run gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" -q \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - echo "Status: $status" - echo "Output: $output" - - [[ "$status" -eq 0 ]] - - run gcloud logging sinks list --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - - echo "Status: $status" - echo "Output: $output" - - [[ ! "$output" =~ "test-logsink-project-bq-${RAND}" ]] - [[ ! "$output" =~ "test-logsink-project-pubsub-${RAND}" ]] - [[ ! "$output" =~ "test-logsink-project-storage-${RAND}" ]] -} diff --git a/dm/templates/logsink/tests/integration/logsink_external_project.yaml b/dm/templates/logsink/tests/integration/logsink_external_project.yaml deleted file mode 100644 index 8958ddaa2d7..00000000000 --- a/dm/templates/logsink/tests/integration/logsink_external_project.yaml +++ /dev/null @@ -1,46 +0,0 @@ -# Test of the logsink template for target in external project. -# -# Variables: -# RAND: A random string used by the testing suite. -# - -imports: - - path: templates/logsink/logsink.py - name: logsink.py - -resources: - # Project sink with bucket destination - - name: test-logsink-project-storage-${RAND} - type: logsink.py - properties: - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - destinationName: test-bucket-${RAND} - destinationType: storage - uniqueWriterIdentity: true - destinationProject: ${TARGET_PROJECT_ID} - storageProperties: - location: us-east1 - - # Project sink with pubsub destination - - name: test-logsink-project-pubsub-${RAND} - type: logsink.py - properties: - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - destinationName: test-topic-${RAND} - destinationType: pubsub - uniqueWriterIdentity: true - destinationProject: ${TARGET_PROJECT_ID} - pubsubProperties: - topic: test-topic-${RAND} - - # Project sink with bq destination - - name: test-logsink-project-bq-${RAND} - type: logsink.py - properties: - projectId: ${CLOUD_FOUNDATION_PROJECT_ID} - destinationName: test_dataset_${RAND} - destinationType: bigquery - uniqueWriterIdentity: true - destinationProject: ${TARGET_PROJECT_ID} - bqProperties: - location: US diff --git a/dm/templates/managed_instance_group/README.md b/dm/templates/managed_instance_group/README.md deleted file mode 100644 index 3729acfb85b..00000000000 --- a/dm/templates/managed_instance_group/README.md +++ /dev/null @@ -1,74 +0,0 @@ -# Managed Instance Group - -This template creates a managed instance group. - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, setup billing, enable requisite APIs](../project/README.md) -- Grant the [compute.admin](https://cloud.google.com/compute/docs/access/iam) IAM role to the [Deployment Manager service account](https://cloud.google.com/deployment-manager/docs/access-control#access_control_for_deployment_manager) - -## Deployment - -### Resources - -- [compute.v1.instance](https://cloud.google.com/compute/docs/reference/latest/instances) -- [compute.v1.autoscaler](https://cloud.google.com/compute/docs/reference/latest/autoscalers) -- [compute.v1.regionalAutoscaler](https://cloud.google.com/compute/docs/reference/latest/regionAutoscalers) -- [compute.v1.instanceTemplate](https://cloud.google.com/compute/docs/reference/latest/instanceTemplates) -- [compute.v1.instanceGroupManager](https://cloud.google.com/compute/docs/reference/latest/instanceGroupManagers) -- [compute.v1.regionalInstanceGroupManager](https://cloud.google.com/compute/docs/reference/latest/regionInstanceGroupManagers) - -### Properties - -See the `properties` section in the schema file(s): - -- [Managed Instance Group](managed_instance_group.py.schema) - -### Usage - -1. Clone the [Deployment Manager samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit) - -```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit -``` - -2. Go to the [dm](../../) directory - -```shell - cd dm -``` - -3. Copy the example DM config to be used as a model for the deployment, in this - case [examples/managed\_instance\_group.yaml](examples/managed_instance_group.yaml) - -```shell - cp templates/managed_instance_group/examples/managed_instance_group.yaml \ - my_managed_instance_group.yaml -``` - -4. Change the values in the config file to match your specific GCP setup. - Refer to the properties in the schema files described above. - -```shell - vim my_managed_instance_group.yaml # <== change values to match your GCP setup -``` - -5. Create your deployment as described below, replacing - \ with your with your own deployment name - -```shell - gcloud deployment-manager deployments create \ - --config my_managed_instance_group.yaml -``` - -6. In case you need to delete your deployment: - -```shell - gcloud deployment-manager deployments delete -``` - -## Examples - -- [Managed Instance Group](examples/managed_instance_group.yaml) -- [Managed Instance Group with Health Check](examples/managed_instance_group_healthcheck.yaml) diff --git a/dm/templates/managed_instance_group/examples/managed_instance_group.yaml b/dm/templates/managed_instance_group/examples/managed_instance_group.yaml deleted file mode 100644 index 637890ff9b0..00000000000 --- a/dm/templates/managed_instance_group/examples/managed_instance_group.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# Example of the Managed Instance Group template usage. -# -# In this example, a simple regional managed instance group is created. - -imports: - - path: templates/managed_instance_group/managed_instance_group.py - name: managed_instance_group.py - -resources: - - name: managed-instance-group-example - type: managed_instance_group.py - properties: - region: us-central1 - autoscaler: - cpuUtilization: - utilizationTarget: 0.7 - minSize: 1 - targetSize: 3 - instanceTemplate: - diskImage: projects/ubuntu-os-cloud/global/images/family/ubuntu-1804-lts - networks: - - network: default - accessConfigs: - - type: ONE_TO_ONE_NAT - machineType: f1-micro diff --git a/dm/templates/managed_instance_group/examples/managed_instance_group_healthcheck.yaml b/dm/templates/managed_instance_group/examples/managed_instance_group_healthcheck.yaml deleted file mode 100644 index 5d515c534ab..00000000000 --- a/dm/templates/managed_instance_group/examples/managed_instance_group_healthcheck.yaml +++ /dev/null @@ -1,32 +0,0 @@ -# Example of the Managed Instance Group template usage. -# -# In this example, a regional managed instance group with attached health check -# and auto-scaling is created. -# -# Pre-requisites steps to run this example: -# Step 1. Create a health check -# Step 2. Replace the placeholders as indicated below -# : an URI of existing health check -# typically in the following format: -# "projects/$PROJECT_ID/global/httpHealthChecks/health-check-name" - -imports: - - path: templates/managed_instance_group/managed_instance_group.py - name: managed_instance_group.py - -resources: - - name: managed-instance-group-example-with-hc - type: managed_instance_group.py - properties: - region: us-central1 - autoscaler: - cpuUtilization: - utilizationTarget: 0.7 - targetSize: 3 - instanceTemplate: - diskImage: projects/ubuntu-os-cloud/global/images/family/ubuntu-1804-lts - networks: - - network: default - accessConfigs: - - type: ONE_TO_ONE_NAT - machineType: f1-micro \ No newline at end of file diff --git a/dm/templates/managed_instance_group/managed_instance_group.py b/dm/templates/managed_instance_group/managed_instance_group.py deleted file mode 100644 index 3a7c26dcec5..00000000000 --- a/dm/templates/managed_instance_group/managed_instance_group.py +++ /dev/null @@ -1,214 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" This template creates a managed instance group. """ - -import copy - -REGIONAL_LOCAL_IGM_TYPES = { - # https://cloud.google.com/compute/docs/reference/rest/v1/regionInstanceGroupManagers - True: 'gcp-types/compute-v1:regionInstanceGroupManagers', - # https://cloud.google.com/compute/docs/reference/rest/v1/instanceGroupManagers - False: 'gcp-types/compute-v1:instanceGroupManagers' -} - - -def set_optional_property(receiver, source, property_name): - """ If set, copies the given property value from one object to another. """ - - if property_name in source: - receiver[property_name] = source[property_name] - - -def create_instance_template(properties, name_prefix): - """ Creates an instance template resource. """ - - name = name_prefix + '-it' - - instance_template = { - 'type': 'instance_template.py', - 'name': name, - 'properties': properties - } - - self_link = '$(ref.{}.selfLink)'.format(name) - - return self_link, [instance_template], [ - { - 'name': 'instanceTemplateSelfLink', - 'value': self_link - } - ] - - -def get_instance_template(properties, name_prefix): - """ If an instance template exists, returns a link to that template. - If no instance template exists: - (a) creates that template; - (b) returns a link to it; and - (c) returns resources/outputs that were required to create the template. - """ - - if 'url' in properties: - return properties['url'], [], [] - - return create_instance_template(properties, name_prefix) - - -def create_autoscaler(context, autoscaler_spec, igm): - """ Creates an autoscaler. """ - - igm_properties = igm['properties'] - - autoscaler_properties = autoscaler_spec.copy() - name = '{}-autoscaler'.format(context.env['name']) - - autoscaler_properties['project'] = context.properties.get('project', context.env['project']) - - autoscaler_resource = { - 'type': 'autoscaler.py', - 'name': name, - 'properties': autoscaler_properties - } - - # Use IGM's targetSize as maxNumReplicas - autoscaler_properties['maxNumReplicas'] = igm_properties['targetSize'] - - # And rename minSize to minNumReplicas - min_size = autoscaler_properties.pop('minSize') - autoscaler_properties['minNumReplicas'] = min_size - - autoscaler_properties['target'] = '$(ref.{}.selfLink)'.format(context.env['name']) - - for location in ['zone', 'region']: - set_optional_property(autoscaler_properties, igm_properties, location) - - autoscaler_output = { - 'name': 'autoscalerSelfLink', - 'value': '$(ref.{}.selfLink)'.format(name) - } - - return [autoscaler_resource], [autoscaler_output] - - -def get_autoscaler(context, igm): - """ Creates an autoscaler, if necessary. """ - - autoscaler_spec = context.properties.get('autoscaler') - if autoscaler_spec: - return create_autoscaler(context, autoscaler_spec, igm) - - return [], [] - - -def get_igm_outputs(name, igm_properties): - """ Creates Instance Group Manaher (IGM) resource outputs. """ - - location_prop = 'region' if 'region' in igm_properties else 'zone' - - return [ - { - 'name': 'selfLink', - 'value': '$(ref.{}.selfLink)'.format(name) - }, - { - 'name': 'name', - 'value': name - }, - { - 'name': 'instanceGroupSelfLink', - 'value': '$(ref.{}.instanceGroup)'.format(name) - }, - { - 'name': location_prop, - 'value': igm_properties[location_prop] - } - ] - - -def dereference_name(reference): - """ Extracts resource name from Deployment Manager reference string. """ - - # Extracting a name from `$(ref.NAME.property)` value results a string - # which starts with `yaml%`. Remove the prefix. - return reference.split('.')[1].replace('yaml%', '') - - -def is_reference(candidate): - """ Checks if provided value is Deployment Manager reference string. """ - - return candidate.strip().startswith('$(ref.') - -def get_igm(context, template_link): - """ Creates the IGM resource with its outputs. """ - - properties = context.properties - name = properties.get('name', context.env['name']) - project_id = properties.get('project', context.env['project']) - is_regional = 'region' in properties - - igm_properties = { - 'name': name, - 'project': project_id, - 'instanceTemplate': template_link, - } - - igm = { - 'name': context.env['name'], - 'type': REGIONAL_LOCAL_IGM_TYPES[is_regional], - 'properties': igm_properties - } - - known_properties = [ - 'description', - 'distributionPolicy', - 'namedPorts', - 'zone', - 'region', - 'targetSize', - 'baseInstanceName' - ] - - for prop in known_properties: - set_optional_property(igm_properties, properties, prop) - - outputs = get_igm_outputs(context.env['name'], igm_properties) - - return [igm], outputs - - -def generate_config(context): - """ Entry point for the deployment resources. """ - - properties = context.properties - project_id = properties.get('project', context.env['project']) - - # Instance template - properties['instanceTemplate']['project'] = project_id - template = get_instance_template(properties['instanceTemplate'], context.env['name']) - template_link, template_resources, template_outputs = template - - # Instance group manager - igm_resources, igm_outputs = get_igm(context, template_link) - igm = igm_resources[0] - - # Autoscaler - autoscaler = get_autoscaler(context, igm) - autoscaler_resources, autoscaler_outputs = autoscaler - - return { - 'resources': - igm_resources + template_resources + autoscaler_resources, - 'outputs': - igm_outputs + template_outputs + autoscaler_outputs - } diff --git a/dm/templates/managed_instance_group/managed_instance_group.py.schema b/dm/templates/managed_instance_group/managed_instance_group.py.schema deleted file mode 100644 index 1a7bff654c0..00000000000 --- a/dm/templates/managed_instance_group/managed_instance_group.py.schema +++ /dev/null @@ -1,1006 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Managed Instance Group - author: Sourced Group Inc. - version: 1.1.0 - description: | - Creates a managed instance group with or without an autoscaler. - - For more information on this resource: - https://cloud.google.com/compute/docs/instance-groups/ - - APIs endpoints used by this template: - - gcp-types/compute-v1:instanceGroupManagers => - https://cloud.google.com/compute/docs/reference/rest/v1/instanceGroupManagers - - gcp-types/compute-v1:regionInstanceGroupManagers => - https://cloud.google.com/compute/docs/reference/rest/v1/regionInstanceGroupManagers - - gcp-types/compute-beta:compute.regionInstanceGroupManagers.setAutoHealingPolicies => - https://cloud.google.com/compute/docs/reference/rest/beta/regionInstanceGroupManagers/setAutoHealingPolicies - - gcp-types/compute-beta:compute.instanceGroupManagers.setAutoHealingPolicies => - https://cloud.google.com/compute/docs/reference/rest/beta/instanceGroupManagers/setAutoHealingPolicies - -imports: - - path: ../autoscaler/autoscaler.py - name: autoscaler.py - - path: ../instance_template/instance_template.py - name: instance_template.py - -additionalProperties: false - -required: - - targetSize - - instanceTemplate - -oneOf: - - required: - - zone - - required: - - region - -definitions: - hasExternalIp: - type: boolean - default: true - description: | - Defines wether the instance will use an external IP from a shared - ephemeral IP address pool. If this is set to false, the instance - will not have an external IP. - natIP: - type: string - description: | - An external IP address associated with this instance. Specify an unused - static external IP address available to the project or leave this field - undefined to use an IP from a shared ephemeral IP address pool. If you - specify a static external IP address, it must live in the same region - as the zone of the instance. - If hasExternalIp is false this field is ignored. - network: - type: string - description: | - URL of the network resource for this instance. When creating an instance, if neither the network - nor the subnetwork is specified, the default network global/networks/default is used; - if the network is not specified but the subnetwork is specified, the network is inferred. - - If you specify this property, you can specify the network as a full or partial URL. - For example, the following are all valid URLs: - - - https://www.googleapis.com/compute/v1/projects/project/global/networks/network - - projects/project/global/networks/network - - global/networks/default - Authorization requires one or more of the following Google IAM permissions on the specified resource network: - - - compute.networks.use - - compute.networks.useExternalIp - subnetwork: - type: string - description: | - The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, - do not specify this field. If the network is in auto subnet mode, specifying the subnetwork is optional. - If the network is in custom subnet mode, specifying the subnetwork is required. - If you specify this field, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - - - https://www.googleapis.com/compute/v1/projects/project/regions/region/subnetworks/subnetwork - - regions/region/subnetworks/subnetwork - Authorization requires one or more of the following Google IAM permissions on the specified resource subnetwork: - - - compute.subnetworks.use - - compute.subnetworks.useExternalIp - networkIP: - type: string - description: | - An IPv4 internal IP address to assign to the instance for this network interface. - If not specified by the user, an unused internal IP is assigned by the system. - -properties: - name: - type: string - description: The name of the managed instance group. Resource name would be used if omitted. - project: - type: string - description: | - The project ID of the project containing the instance. - description: - type: string - description: An optional description of the resource. - zone: - type: string - description: | - The name of the zone where the managed instance group is located - (for zonal resources). - region: - type: string - description: | - The name of the region where the managed instance group is located - (for regional resources). - baseInstanceName: - type: string - description: The base instance name to use for instances in the group. - targetSize: - type: integer - description: | - The target number of running instances for the managed instance group. - If used with an autoscaler, this is the maximum number of instances in - the group. - namedPorts: - type: array - uniqueItems: true - description: | - A list of the named ports configured for the instance groups - complementary to the Instance Group Manager. - items: - type: object - additionalProperties: false - required: - - name - - port - properties: - name: - type: string - description: The port name. - port: - type: integer - minimum: 1 - maximum: 65535 - description: The port number. - instanceTemplate: - type: object - description: | - The instance template specified for this managed instance group. - The template is used to create all new instances in the group. - additionalProperties: false - oneOf: - - allOf: - - required: - - url - - not: - required: - - diskImage - - not: - required: - - network - - not: - required: - - networks - - not: - required: - - natIP - - not: - required: - - subnetwork - - not: - required: - - networkIP - - allOf: - - required: - - diskImage - - oneOf: - - allOf: - - required: - - networks - - properties: - networks: - minItems: 1 - - not: - required: - - network - - not: - required: - - natIP - - not: - required: - - subnetwork - - not: - required: - - networkIP - - allOf: - - required: - - network - - not: - required: - - networks - properties: - url: - type: string - description: The URL of the existing instance template resource. - name: - type: string - description: The name of the instance template resource. - templateDescription: - type: string - description: The resource description (optional). - instanceDescription: - type: string - description: | - The description of the instance resource the instance template - will create (optional). - network: - $ref: '#/definitions/network' - subnetwork: - $ref: '#/definitions/subnetwork' - networkIP: - $ref: '#/definitions/networkIP' - hasExternalIp: - $ref: '#/definitions/hasExternalIp' - natIP: - $ref: '#/definitions/natIP' - networks: - type: array - uniqueItems: true - description: | - Networks the instance will be connected to; - e.g., 'my-custom-network' or 'default'. - items: - type: object - additionalProperties: false - required: - - network - properties: - network: - $ref: '#/definitions/network' - subnetwork: - $ref: '#/definitions/subnetwork' - networkIP: - $ref: '#/definitions/networkIP' - aliasIpRanges: - type: array - uniqueItems: true - description: | - An array of alias IP ranges for this network interface. You can only specify this - field for network interfaces in VPC networks. - items: - type: object - additionalProperties: false - properties: - ipCidrRange: - type: string - description: | - The IP alias ranges to allocate for this interface. This IP CIDR range must belong - to the specified subnetwork and cannot contain IP addresses reserved by system or - used by other network interfaces. This range may be a single IP address (such as 10.2.3.4), - a netmask (such as /24) or a CIDR-formatted string (such as 10.1.2.0/24). - subnetworkRangeName: - type: string - description: | - The name of a subnetwork secondary IP range from which to allocate an IP alias range. - If not specified, the primary range of the subnetwork is used. - accessConfigs: - type: array - uniqueItems: true - description: | - An array of configurations for this interface. Currently, only one access config, ONE_TO_ONE_NAT, - is supported. If there are no accessConfigs specified, then this instance will have no external internet access. - items: - type: object - additionalProperties: false - properties: - type: - type: string - description: | - The type of configuration. The default and only option is ONE_TO_ONE_NAT. - enum: - - ONE_TO_ONE_NAT - name: - type: string - description: | - The name of this access configuration. The default and recommended name is External NAT, - but you can use any arbitrary string, such as My external IP or Network Access. - setPublicPtr: - type: boolean - description: | - Specifies whether a public DNS 'PTR' record should be created to map the external - IP address of the instance to a DNS domain name. - publicPtrDomainName: - type: string - description: | - The DNS domain name for the public PTR record. You can set this field only - if the setPublicPtr field is enabled. - networkTier: - type: string - description: | - This signifies the networking tier used for configuring this access configuration - and can only take the following values: PREMIUM, STANDARD. - - If an AccessConfig is specified without a valid external IP address, an - ephemeral IP will be created with this networkTier. - - If an AccessConfig with a valid external IP address is specified, it must match - that of the networkTier associated with the Address resource owning that IP. - enum: - - STANDARD - - PREMIUM - natIP: - $ref: '#/definitions/natIP' - disks: - type: array - uniqueItems: true - description: | - Array of disks associated with this instance. Persistent disks must be created before you can assign them. - items: - type: object - additionalProperties: false - oneOf: - - required: - - source - - required: - - initializeParams - - allOf: - - not: - required: - - source - - not: - required: - - initializeParams - properties: - type: - type: string - description: | - Specifies the type of the disk, either SCRATCH or PERSISTENT. If not specified, the default is PERSISTENT. - enum: - - SCRATCH - - PERSISTENT - mode: - type: string - description: | - The mode in which to attach this disk, either READ_WRITE or READ_ONLY. - If not specified, the default is to attach the disk in READ_WRITE mode. - enum: - - READ_WRITE - - READ_ONLY - source: - type: string - description: | - Specifies a valid partial or full URL to an existing Persistent Disk resource. - When creating a new instance, one of initializeParams.sourceImage or - disks.source is required except for local SSD. - - If desired, you can also attach existing non-root persistent disks using this property. - This field is only applicable for persistent disks. - - Note that for InstanceTemplate, specify the disk name, not the URL for the disk. - - Authorization requires one or more of the following Google IAM permissions on the specified resource source: - - compute.disks.use - compute.disks.useReadOnly - deviceName: - type: string - description: | - Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* - tree of a Linux operating system running within the instance. This name can be used to reference - the device for mounting, resizing, and so on, from within the instance. - - If not specified, the server chooses a default device name to apply to this disk, in the - form persistent-disk-x, where x is a number assigned by Google Compute Engine. - This field is only applicable for persistent disks. - boot: - type: boolean - description: | - Indicates that this is a boot disk. The virtual machine will use the first partition - of the disk for its root filesystem. - initializeParams: - type: object - additionalProperties: false - description: | - Specifies the parameters for a new disk that will be created alongside the new instance. - Use initialization parameters to create boot disks or local SSDs attached to the new instance. - - This property is mutually exclusive with the source property; you can only define one or the other, but not both. - properties: - labels: - type: object - description: | - Labels to apply to this disk. These can be later modified by the disks.setLabels method. - This field is only applicable for persistent disks. - - An object containing a list of "key": value pairs. - Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. - - Authorization requires the following Google IAM permission on the specified resource labels: - - compute.disks.setLabels - diskName: - type: string - description: | - Specifies the disk name. If not specified, the default is to use the name of the instance. - If the disk with the instance name exists already in the given zone/region, - a new name will be automatically generated. - sourceImage: - type: string - description: | - The source image to create this disk. When creating a new instance, one of - initializeParams.sourceImage or disks.source is required except for local SSD. - - To create a disk with one of the public operating system images, specify the image by its family name. - For example, specify family/debian-9 to use the latest Debian 9 image: - - projects/debian-cloud/global/images/family/debian-9 - - Alternatively, use a specific version of a public operating system image: - - projects/debian-cloud/global/images/debian-9-stretch-vYYYYMMDD - - To create a disk with a custom image that you created, specify the image name in the following format: - - global/images/my-custom-image - - You can also specify a custom image by its image family, which returns the latest version of the - image in that family. Replace the image name with family/family-name: - - global/images/family/my-image-family - - If the source image is deleted later, this field will not be set. - - Authorization requires the following Google IAM permission on the specified resource sourceImage: - - compute.images.useReadOnly - description: - type: string - description: | - An optional description. Provide this property when creating the disk. - diskSizeGb: - type: number - description: | - Specifies the size of the disk in base-2 GB. - diskType: - type: string - description: | - Specifies the disk type to use to create the instance. If not specified, the default is pd-standard, - specified using the full URL. For example: - - https://www.googleapis.com/compute/v1/projects/project/zones/zone/diskTypes/pd-standard - - Other values include pd-ssd and local-ssd. If you define this field, you can provide either the full - or partial URL. For example, the following are valid values: - - https://www.googleapis.com/compute/v1/projects/project/zones/zone/diskTypes/diskType - projects/project/zones/zone/diskTypes/diskType - zones/zone/diskTypes/diskType - Note that for InstanceTemplate, this is the name of the disk type, not URL. - enum: - - pd-standard - - pd-ssd - - local-ssd - sourceImageEncryptionKey: - type: object - additionalProperties: false - description: | - The customer-supplied encryption key of the source image. Required if the source image is - protected by a customer-supplied encryption key. - - Instance templates do not store customer-supplied encryption keys, so you cannot create disks - for instances in a managed instance group if the source images are encrypted with your own keys. - properties: - rawKey: - type: string - description: | - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 - to either encrypt or decrypt this resource. - kmsKeyName: - type: string - description: | - The name of the encryption key that is stored in Google Cloud KMS. - sourceSnapshot: - type: string - description: | - The source snapshot to create this disk. When creating a new instance, one of - initializeParams.sourceSnapshot or disks.source is required except for local SSD. - - To create a disk with a snapshot that you created, specify the snapshot name in the following format: - - global/snapshots/my-backup - - If the source snapshot is deleted later, this field will not be set. - - Authorization requires the following Google IAM permission on the specified resource sourceSnapshot: - - compute.snapshots.useReadOnly - sourceSnapshotEncryptionKey: - type: object - additionalProperties: false - description: | - The customer-supplied encryption key of the source snapshot. - properties: - rawKey: - type: string - description: | - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 - to either encrypt or decrypt this resource. - kmsKeyName: - type: string - description: | - The name of the encryption key that is stored in Google Cloud KMS. - autoDelete: - type: boolean - description: | - Specifies whether the disk will be auto-deleted when the instance is deleted - (but not when the disk is detached from the instance). - interface: - type: string - description: | - Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. - The default is SCSI. Persistent disks must always use SCSI and the request will fail if you - attempt to attach a persistent disk in any other format than SCSI. Local SSDs can use either NVME or SCSI. - For performance characteristics of SCSI over NVMe, see Local SSD performance. - enum: - - SCSI - - NVME - guestOsFeatures: - type: array - uniqueItems: true - description: | - A list of features to enable on the guest operating system. Applicable only for bootable images. - Read Enabling guest operating system features to see a list of available options. - items: - type: object - additionalProperties: false - properties: - type: - type: string - description: | - https://cloud.google.com/compute/docs/images/create-delete-deprecate-private-images#guest-os-features - The ID of a supported feature. Read Enabling guest operating system features - to see a list of available options. - enum: - - MULTI_IP_SUBNET - - SECURE_BOOT - - UEFI_COMPATIBLE - - VIRTIO_SCSI_MULTIQUEUE - - WINDOWS - diskEncryptionKey: - type: object - additionalProperties: false - description: | - The customer-supplied encryption key of the source snapshot. - properties: - rawKey: - type: string - description: | - Encrypts or decrypts a disk using a customer-supplied encryption key. - - If you are creating a new disk, this field encrypts the new disk using an encryption - key that you provide. If you are attaching an existing disk that is already encrypted, - this field decrypts the disk using the customer-supplied encryption key. - - If you encrypt a disk using a customer-supplied key, you must provide the same key again when - you attempt to use this resource at a later time. For example, you must provide the key when - you create a snapshot or an image from the disk or when you attach the disk - to a virtual machine instance. - - If you do not provide an encryption key, then the disk will be encrypted using an automatically - generated key and you do not need to provide a key to use the disk later. - - Instance templates do not store customer-supplied encryption keys, so you cannot use your own keys - to encrypt disks in a managed instance group. - kmsKeyName: - type: string - description: | - The name of the encryption key that is stored in Google Cloud KMS. - scheduling: - type: object - additionalProperties: false - description: | - Sets the scheduling options for this instance. - properties: - onHostMaintenance: - type: string - description: | - Defines the maintenance behavior for this instance. For standard instances, the default behavior is MIGRATE. - For preemptible instances, the default and only possible behavior is TERMINATE. - For more information, see Setting Instance Scheduling Options. - enum: - - MIGRATE - - TERMINATE - automaticRestart: - type: boolean - description: | - Specifies whether the instance should be automatically restarted if it is terminated by Compute Engine - (not terminated by a user). You can only set the automatic restart option for standard instances. - Preemptible instances cannot be automatically restarted. - - By default, this is set to true so an instance is automatically restarted if it is terminated by Compute Engine. - preemptible: - type: boolean - description: | - Defines whether the instance is preemptible. This can only be set during instance creation, - it cannot be set or changed after the instance has been created. - nodeAffinities: - type: array - uniqueItems: true - description: | - A set of node affinity and anti-affinity. - items: - type: object - additionalProperties: false - properties: - key: - type: string - description: | - Corresponds to the label key of Node resource. - operator: - type: string - description: | - Defines the operation of node selection. - values: - type: array - uniqueItems: true - description: | - Corresponds to the label values of Node resource. - items: - type: string - minCpuPlatform: - type: string - description: | - Specifies a minimum CPU platform for the VM instance. Applicable values are the friendly names of CPU platforms, - such as minCpuPlatform: "Intel Haswell" or minCpuPlatform: "Intel Sandy Bridge". - enum: - - Intel Sandy Bridge - - Intel Ivy Bridge - - Intel Haswell - - Intel Broadwell - - Intel Skylake - sourceInstance: - type: string - description: | - The source instance used to create the template. You can provide this as a partial or full URL to the resource. - For example, the following are valid values: - - - https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance - - projects/project/zones/zone/instances/instance - - Authorization requires the following Google IAM permission on the specified resource sourceInstance: - - compute.instances.get - sourceInstanceParams: - type: object - additionalProperties: false - description: | - The source instance params to use to create this instance template. - properties: - diskConfigs: - type: array - uniqueItems: true - description: | - Attached disks configuration. If not provided, defaults are applied: For boot disk and any other R/W disks, - new custom images will be created from each disk. For read-only disks, they will be attached - in read-only mode. Local SSD disks will be created as blank volumes. - items: - type: object - additionalProperties: false - properties: - deviceName: - type: string - description: | - Specifies the device name of the disk to which the configurations apply to. - instantiateFrom: - type: string - description: | - Specifies whether to include the disk and what image to use. Possible values are: - - - source-image: to use the same image that was used to create the source instance's corresponding disk. - Applicable to the boot disk and additional read-write disks. - - source-image-family: to use the same image family that was used to create the source instance's - corresponding disk. Applicable to the boot disk and additional read-write disks. - - custom-image: to use a user-provided image url for disk creation. Applicable to the boot disk and - additional read-write disks. - - attach-read-only: to attach a read-only disk. Applicable to read-only disks. - - do-not-include: to exclude a disk from the template. Applicable to additional read-write disks, - local SSDs, and read-only disks. - enum: - - source-image - - source-image-family - - custom-image - - attach-read-only - autoDelete: - type: boolean - description: | - Specifies whether the disk will be auto-deleted when the instance is deleted - (but not when the disk is detached from the instance). - customImage: - type: string - description: | - The custom source image to be used to restore this disk when instantiating this instance template. - shieldedInstanceConfig: - type: object - additionalProperties: false - properties: - enableSecureBoot: - type: boolean - description: | - Defines whether the instance has Secure Boot enabled. - enableVtpm: - type: boolean - description: | - Defines whether the instance has the vTPM enabled. - enableIntegrityMonitoring: - type: boolean - description: | - Defines whether the instance has integrity monitoring enabled. - guestAccelerators: - type: array - uniqueItems: true - description: | - A list of the type and count of accelerator cards attached to the instance. - items: - type: object - additionalProperties: false - properties: - acceleratorType: - type: string - description: | - Full or partial URL of the accelerator type resource to attach to this instance. For example: projects/my-project/zones/us-central1-c/acceleratorTypes/nvidia-tesla-p100 - If you are creating an instance template, specify only the accelerator name. - See GPUs on Compute Engine for a full list of accelerator types. - acceleratorCount: - type: integer - description: | - The number of the guest accelerator cards exposed to this instance. - machineType: - type: string - default: n1-standard-1 - description: | - The Compute Instance type; e.g., 'n1-standard-1'. - See https://cloud.google.com/compute/docs/machine-types for details. - canIpForward: - type: boolean - description: | - Defines whether the instance is allowed to send and receive packets - with non-matching destination or source IPs. - diskType: - type: string - default: pd-standard - enum: - - pd-ssd - - pd-standard - - local-ssd - description: Boot disk type. - diskImage: - type: string - description: | - The source image to create the disk. To create the disk with one of - the public operating system images, specify the image by its family - name. - For example, specify family/debian-9 to use the latest Debian 9 - image, projects/debian-cloud/global/images/family/debian-9. - To create a disk with a custom image, specify the image name in the - global/images/my-custom-image format. - See https://cloud.google.com/compute/docs/images for details. - diskSizeGb: - type: integer - minimum: 10 - metadata: - type: object - additionalProperties: false - description: | - Instance metadata. - For example: - metadata: - items: - - key: startup-script - - value: sudo apt-get update - properties: - items: - type: array - uniqueItems: true - description: The metadata key-value pairs. - items: - type: object - additionalProperties: false - properties: - key: - type: string - value: - type: string - serviceAccounts: - type: array - uniqueItems: true - description: | - The list of service accounts, with their specified scopes, authorized - for this instance. Only one service account per VM instance is - supported. - items: - type: object - additionalProperties: false - properties: - email: - type: string - description: The email address of the service account. - scopes: - type: array - uniqueItems: true - description: | - The list of scopes to be made available to the service account. - items: - type: string - description: | - The access scope; - e.g., 'https://www.googleapis.com/auth/compute.readonly'. - See https://cloud.google.com/compute/docs/access/service-accounts#accesscopesiam - for details - tags: - type: object - additionalProperties: false - description: | - The list of tags to apply to the instances that are created from the - template. The tags identify valid sources or targets for network - firewalls. - properties: - items: - type: array - uniqueItems: true - description: The array of tags. - items: - type: string - labels: - type: object - description: | - The labels to apply to instances created from the template. - Example: - name: wrench - mass: 1.3kg - count: 3 - distributionPolicy: - type: object - additionalProperties: false - description: | - The policy that specifies the intended distribution of instances in a - regional managed instance group. - properties: - zones: - type: array - uniqueItems: true - description: | - A list of zones where the regional managed instance group creates and - manages instances. - items: - type: object - additionalProperties: false - properties: - zone: - type: string - description: | - The URL of the zone. The zone must exist in the region where - the managed instance group is located. - autoscaler: - type: object - additionalProperties: false - description: | - The configuration of the autosaler - a mechanism that automatically - adjusts the number of instances in a group based on the current load. - anyOf: - - required: - - cpuUtilization - - required: - - loadBalancingUtilization - - required: - - customMetricUtilizations - properties: - minSize: - type: integer - default: 1 - minimum: 0 - description: | - The minimum number of replicas the autoscaler can scale down to. - name: - type: string - description: The resource name. - description: - type: string - description: The resource description. - coolDownPeriodSec: - type: integer - default: 60 - description: | - The number of seconds the autoscaler must wait before it starts - collecting information from a new instance. - cpuUtilization: - type: object - additionalProperties: false - description: | - Defines the CPU utilization policy that allows the autoscaler to - scale based on the average CPU utilization of a managed instance - group. - required: - - utilizationTarget - properties: - utilizationTarget: - type: number - minimum: 0 - maximum: 1 - description: | - The CPU utilization the autoscaler must maintain (as a target - value). - loadBalancingUtilization: - type: object - additionalProperties: false - required: - - utilizationTarget - description: | - Configuration parameters for autoscaling based on the load balancer. - properties: - utilizationTarget: - type: number - minimum: 0 - maximum: 1 - description: The fraction of the back-end capacity utilization. - customMetricUtilizations: - type: array - uniqueItems: true - description: | - Configuration parameters for autoscaling based on a custom metric. - items: - type: object - additionalProperties: false - required: - - metric - - utilizationTarget - properties: - metric: - type: string - description: | - The identifier (type) of the Stackdriver Monitoring metric. - utilizationTarget: - type: number - description: | - The value of the metric the autoscaler must maintain (as a - target). This must be a positive value. - utilizationTargetType: - type: string - default: GAUGE - enum: - - GAUGE - - DELTA_PER_SECOND - - DELTA_PER_MINUTE - description: | - The option that defines how the target utilization value of the - Stackdriver Monitoring metric is expressed. - -outputs: - name: - type: string - description: The name of the managed instance group manager resource. - selfLink: - type: string - description: | - The URL (SelfLink) of the managed instance group manager resource. - instanceGroupSelfLink: - type: string - description: The URL (SelfLink) of the managed instance group resource. - region: - type: string - description: | - The URL of the region where the managed instance group resides - (for regional resources). - zone: - type: string - description: | - The URL of the zone where the managed instance group is located - (for zonal resources). - autoscalerSelfLink: - type: string - description: | - The URL (SelfLink) of the autoscaler resource (if the group is used - with the autoscaler). - instanceTemplateSelfLink: - type: string - description: | - The URL (SelfLink) of the instance template resource (if new instance - template was created for the group). - -documentation: - - templates/managed_instance_group/README.md - -examples: - - templates/managed_instance_group/examples/managed_instance_group.yaml - - templates/managed_instance_group/examples/managed_instance_group_healthcheck.yaml diff --git a/dm/templates/managed_instance_group/tests/integration/managed_instance_group.bats b/dm/templates/managed_instance_group/tests/integration/managed_instance_group.bats deleted file mode 100755 index 9969933b3ab..00000000000 --- a/dm/templates/managed_instance_group/tests/integration/managed_instance_group.bats +++ /dev/null @@ -1,161 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" - export ZONAL_MIG_NAME="zonal-mig-${RAND}" - export ZONAL_MIG_RES_NAME="mig-${RAND}" - export ZONE="us-central1-c" - export REGION="us-central1" - export AUTOSCALER_NAME="autoscaler-${RAND}" - export COOL_DOWN_PERIOD="70" - export MIN_SIZE="1" - export TARGET_SIZE="2" - export UTILIZATION_TARGET="0.7" - export PORT_NAME="http" - export PORT="80" - export BASE_INSTANCE_NAME="bin-${RAND}" - export INSTANCE_TEMPLATE_NAME="it-${RAND}" - export IT_NETWORK="default" - export IT_BASE_IMAGE="projects/ubuntu-os-cloud/global/images/family/ubuntu-1804-lts" - export REGIONAL_MIG_NAME="regional-mig-${RAND}" - export HEALTH_CHECK_NAME="test-healthcheck-http-${RAND}" - export SECOND_HEALTH_CHECK_NAME="second-test-healthcheck-http-${RAND}" - export INITIAL_DELAY_SEC="450" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - envsubst < ${BATS_TEST_DIRNAME}/${TEST_NAME}.yaml > "${CONFIG}" -} - -function delete_config() { - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - # Needed for testing resource creation with preexisting (not referenced) - # health check - gcloud compute http-health-checks create "${SECOND_HEALTH_CHECK_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - rm -f "${RANDOM_FILE}" - gcloud compute http-health-checks delete "${SECOND_HEALTH_CHECK_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -q - delete_config - fi - - # Per-test teardown steps. -} - - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - run gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config "${CONFIG}" --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "$output" - [[ "$status" -eq 0 ]] -} - -@test "Verifying that a zonal intance group was created" { - run gcloud compute instance-groups managed list \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "$output" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${ZONAL_MIG_NAME}" ]] - [[ "$output" =~ "${ZONE}" ]] -} - -@test "Verifying regional instance group properties" { - run gcloud compute instance-groups managed list \ - --filter "name=(${REGIONAL_MIG_NAME})" \ - --format "yaml(region)" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${REGION}" ]] -} - -@test "Verifying zonal instance group properties" { - run gcloud compute instance-groups managed describe "${ZONAL_MIG_NAME}" \ - --zone "${ZONE}" --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "baseInstanceName: ${BASE_INSTANCE_NAME}" ]] - [[ "$output" =~ "instanceGroups/${ZONAL_MIG_NAME}" ]] - [[ "$output" =~ "instanceTemplates/${INSTANCE_TEMPLATE_NAME}" ]] - [[ "$output" =~ "name: ${PORT_NAME}" ]] - [[ "$output" =~ "port: ${PORT}" ]] -} - -@test "Verifying autoscaler properties" { - run gcloud compute instance-groups managed describe "${ZONAL_MIG_NAME}" \ - --zone "${ZONE}" --format="yaml(autoscaler)"\ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "cpuUtilization:" ]] - [[ "$output" =~ "utilizationTarget: ${UTILIZATION_TARGET}" ]] - [[ "$output" =~ "coolDownPeriodSec: ${COOL_DOWN_PERIOD}" ]] - [[ "$output" =~ "maxNumReplicas: ${TARGET_SIZE}" ]] - [[ "$output" =~ "minNumReplicas: ${MIN_SIZE}" ]] - [[ "$output" =~ "name: ${AUTOSCALER_NAME}" ]] -} - -@test "Verifying instance template properties" { - run gcloud compute instance-templates describe "${INSTANCE_TEMPLATE_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${IT_BASE_IMAGE}" ]] - [[ "$output" =~ "networks/${IT_NETWORK}" ]] -} - -@test "Verifying regional instance group health check properties" { - run gcloud beta compute instance-groups managed describe \ - "${REGIONAL_MIG_NAME}" --region "${REGION}" \ - --format "yaml(autoHealingPolicies)" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${HEALTH_CHECK_NAME}" ]] - [[ "$output" =~ "initialDelaySec: ${INITIAL_DELAY_SEC}" ]] -} - -@test "Verifying zonal instance group health check properties" { - run gcloud beta compute instance-groups managed describe \ - "${ZONAL_MIG_NAME}" --zone "${ZONE}" \ - --format "yaml(autoHealingPolicies)" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${HEALTH_CHECK_NAME}" ]] - [[ "$output" =~ "initialDelaySec: ${INITIAL_DELAY_SEC}" ]] -} - -@test "Deleting deployment" { - run gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" -q \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] -} - diff --git a/dm/templates/managed_instance_group/tests/integration/managed_instance_group.yaml b/dm/templates/managed_instance_group/tests/integration/managed_instance_group.yaml deleted file mode 100644 index c65f83f97b5..00000000000 --- a/dm/templates/managed_instance_group/tests/integration/managed_instance_group.yaml +++ /dev/null @@ -1,44 +0,0 @@ -# Test of the Managed Instance Group template. - -imports: - - path: templates/managed_instance_group/managed_instance_group.py - name: managed_instance_group.py - -resources: - - name: ${ZONAL_MIG_RES_NAME} - type: managed_instance_group.py - properties: - name: ${ZONAL_MIG_NAME} - zone: ${ZONE} - autoscaler: - name: ${AUTOSCALER_NAME} - coolDownPeriodSec: ${COOL_DOWN_PERIOD} - minSize: ${MIN_SIZE} - cpuUtilization: - utilizationTarget: ${UTILIZATION_TARGET} - namedPorts: - - name: ${PORT_NAME} - port: ${PORT} - baseInstanceName: ${BASE_INSTANCE_NAME} - targetSize: ${TARGET_SIZE} - instanceTemplate: - name: ${INSTANCE_TEMPLATE_NAME} - diskImage: ${IT_BASE_IMAGE} - networks: - - network: ${IT_NETWORK} - accessConfigs: - - type: ONE_TO_ONE_NAT - - name: ${REGIONAL_MIG_NAME} - type: managed_instance_group.py - properties: - region: ${REGION} - autoscaler: - cpuUtilization: - utilizationTarget: ${UTILIZATION_TARGET} - targetSize: ${TARGET_SIZE} - instanceTemplate: - url: $(ref.${ZONAL_MIG_RES_NAME}.instanceTemplateSelfLink) - -# Test prerequisites: - - name: ${HEALTH_CHECK_NAME} - type: compute.v1.httpHealthCheck diff --git a/dm/templates/nat_gateway/README.md b/dm/templates/nat_gateway/README.md deleted file mode 100644 index b80c6003f03..00000000000 --- a/dm/templates/nat_gateway/README.md +++ /dev/null @@ -1,81 +0,0 @@ -# HA NAT Gateway - -This template creates a High Availability NAT Gateway based on the number of -regions specified. Each gateway is a managed instance group of one with -auto-healing through healthchecks. The only firewall rule created is for the -instance healthcheck. Any additional traffic you wish to go through the gateway -will require additional firewall rules (for example, TCP/UDP/ICMP, etc.). - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) -- Grant the [compute.admin](https://cloud.google.com/compute/docs/access/iam) - IAM role to the [Deployment Manager service account](https://cloud.google.com/deployment-manager/docs/access-control#access_control_for_deployment_manager) -- Grant the [compute.networkAdmin](https://cloud.google.com/compute/docs/access/iam) - IAM role to the [Deployment Manager service account](https://cloud.google.com/deployment-manager/docs/access-control#access_control_for_deployment_manager) -- NOTE: The NAT Gateway integration tests will need additional IAM permissions. The tests will SSH into test instances to verify the NAT functionality. Please refer to [Managing Instance Access Using OS Login](https://cloud.google.com/compute/docs/instances/managing-instance-access#enable_oslogin) and [Connecting through a bastion host](https://cloud.google.com/compute/docs/instances/connecting-advanced#bastion_host) page for additional information. - -## Deployment - -### Resources - -- [compute.v1.addresses](https://cloud.google.com/compute/docs/reference/rest/v1/addresses) -- [compute.v1.instanceTemplate](https://cloud.google.com/compute/docs/reference/latest/instanceTemplates) -- [compute.v1.instanceGroupManagers](https://cloud.google.com/compute/docs/reference/rest/v1/instanceGroupManagers) -- [compute.v1.firewalls](https://cloud.google.com/compute/docs/reference/rest/v1/firewalls) -- [compute.v1.routes](https://cloud.google.com/compute/docs/reference/rest/v1/routes) -- [compute.v1.healthChecks](https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks) - -### Properties - -See the `properties` section in the schema file(s): - -- [NAT Gateway](nat_gateway.py.schema) - -### Usage - -1. Clone the [Deployment Manager samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit) - -```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit -``` - -2. Go to the [dm](../../) directory - -```shell - cd dm -``` - -3. Copy the example DM config to be used as a model for the deployment, in this - case [examples/nat\_gateway.yaml](examples/nat_gateway.yaml) - -```shell - cp templates/nat_gateway/examples/nat_gateway.yaml \ - my_nat_gateway.yaml -``` - -4. Change the values in the config file to match your specific GCP setup. - Refer to the properties in the schema files described above. - -```shell - vim my_nat_gateway.yaml # <== change values to match your GCP setup -``` - -5. Create your deployment as described below, replacing - \ with your with your own deployment name - -```shell - gcloud deployment-manager deployments create \ - --config my_nat_gateway.yaml -``` - -6. In case you need to delete your deployment: - -```shell - gcloud deployment-manager deployments delete -``` - -## Examples - -- [NAT Gateway](examples/nat_gateway.yaml) diff --git a/dm/templates/nat_gateway/examples/nat_gateway.yaml b/dm/templates/nat_gateway/examples/nat_gateway.yaml deleted file mode 100644 index 7e2f6ab9be3..00000000000 --- a/dm/templates/nat_gateway/examples/nat_gateway.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Example of the HA NAT gateway template usage. -# -# In this example, HA NAT gateways are created for each zone in a region for -# high availability. -# -# Replace the following placeholders with relevant values: -# : A valid VPC network -# : A valid subnetwork - - -imports: - - path: templates/nat_gateway/nat_gateway.py - name: nat-gateway.py - -resources: - - name: nat-gateway - type: nat-gateway.py - properties: - network: - subnetwork: - natIpRange: 10.240.1.0/24 - region: us-east1 - zones: - - us-east1-b - - us-east1-c - - us-east1-d - natGatewayTag: natgw - nattedVmTag: no-ip diff --git a/dm/templates/nat_gateway/nat_gateway.py b/dm/templates/nat_gateway/nat_gateway.py deleted file mode 100644 index 73b09203b68..00000000000 --- a/dm/templates/nat_gateway/nat_gateway.py +++ /dev/null @@ -1,424 +0,0 @@ -# Copyright 2017 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" This template creates an HA NAT gateway. """ - - -SETUP_NATGATEWAY_SH = """#!/bin/bash -echo 1 > /proc/sys/net/ipv4/ip_forward -sysctl -w net.ipv4.ip_forward=1 -echo "net.ipv4.ip_forward=1" | tee -a /etc/sysctl.conf > /dev/null -iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE -apt-get -y install iptables-persistent -cat < /usr/local/sbin/health-check-server.py -#!/usr/bin/python -from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer -import subprocess -PORT_NUMBER = 80 -PING_HOST = "www.google.com" -def connectivityCheck(): - try: - subprocess.check_call(["ping", "-c", "1", PING_HOST]) - return True - except subprocess.CalledProcessError as e: - return False -#This class will handle any incoming request -class myHandler(BaseHTTPRequestHandler): - def do_GET(self): - if self.path == '/health-check': - if connectivityCheck(): - self.send_response(200) - else: - self.send_response(503) - else: - self.send_response(404) -try: - server = HTTPServer(("", PORT_NUMBER), myHandler) - print "Started httpserver on port " , PORT_NUMBER - #Wait forever for incoming http requests - server.serve_forever() -except KeyboardInterrupt: - print "^C received, shutting down the web server" - server.socket.close() -EOF -nohup python /usr/local/sbin/health-check-server.py >/dev/null 2>&1 & -#register a runtime config variable for a waiter to complete -CONFIG_NAME=$(curl http://metadata.google.internal/computeMetadata/v1/instance/attributes/runtime-config -H "Metadata-Flavor: Google") -VARIABLE_NAME=$(curl http://metadata.google.internal/computeMetadata/v1/instance/attributes/runtime-variable -H "Metadata-Flavor: Google") -gcloud beta runtime-config configs variables set $VARIABLE_NAME 1 --config-name $CONFIG_NAME -""" - -def get_network(project_id, properties): - """ Gets a network name. """ - - network_name = properties.get('network') - is_self_link = '/' in network_name or '.' in network_name - - if is_self_link: - network_url = network_name - else: - network_url = 'projects/{}/global/networks/{}'.format(project_id, network_name) - - return network_url - - -def get_subnetwork(project_id, context): - """ Gets a subnetwork name. """ - - subnet_name = context.properties.get('subnetwork') - is_self_link = '/' in subnet_name or '.' in subnet_name - - if is_self_link: - subnet_url = subnet_name - else: - subnet_url = 'projects/{}/regions/{}/subnetworks/{}' - subnet_url = subnet_url.format( - project_id, - context.properties['region'], - subnet_name - ) - - return subnet_url - - -def get_healthcheck(project_id, name): - """ Generate a healthcheck resource. """ - - resource = { - 'name': name, - 'type': 'healthcheck.py', - 'properties': - { - 'healthcheckType': 'HTTP', - 'port': 80, - 'requestPath': '/health-check', - 'healthyThreshold': 1, - 'unhealthyThreshold': 5, - 'checkIntervalSec': 30, - 'project': project_id, - } - } - - return resource - - -def get_firewall(context, project_id, network): - """ Generate a firewall rule for the healthcheck. """ - - # pylint: disable=line-too-long - # See https://cloud.google.com/compute/docs/load-balancing/health-checks#health_check_source_ips_and_firewall_rules. - name = context.env['name'] + '-healthcheck-firewall' - resource = { - 'name': name, - 'type': 'firewall.py', - 'properties': - { - 'project': project_id, - 'networkName': network, - 'rules': - [ - { - 'name': name, - 'allowed': [ - { - 'IPProtocol': 'tcp', - 'ports': ['80'], - } - ], - 'targetTags': [context.properties['natGatewayTag']], - 'description': - 'rule for allowing all health check traffic', - 'sourceRanges': ['130.211.0.0/22', - '35.191.0.0/16'] - } - ] - } - } - - return resource - - -def get_external_internal_ip(project_id, - ip_name, - external_ip_name, - internal_ip_name, - region, - subnet): - - """ Generate an external IP resource. """ - - resource = { - 'name': ip_name, - 'type': 'ip_reservation.py', - 'properties': - { - 'ipAddresses': - [ - { - 'name': external_ip_name, - 'project': project_id, - 'ipType': 'REGIONAL', - 'region': region - }, - { - 'name': internal_ip_name, - 'project': project_id, - 'ipType': 'INTERNAL', - 'region': region, - 'subnetwork': subnet - } - ] - } - } - - return resource - - -def get_instance_template(project_id, - context, - instance_template_name, - external_ip, - internal_ip, - network, - subnet): - - """ Generate an instance template resource. """ - - resource = { - 'name': instance_template_name, - 'type': 'instance_template.py', - 'properties': - { - 'project': project_id, - 'natIP': external_ip, - 'network': network, - 'subnetwork': subnet, - 'networkIP': internal_ip, - 'diskImage': context.properties['imageType'], - 'machineType': context.properties['machineType'], - 'canIpForward': True, - 'diskType': context.properties['diskType'], - 'diskSizeGb': context.properties['diskSizeGb'], - 'tags': { - 'items': [context.properties['natGatewayTag']] - }, - 'metadata': - { - 'items': - [ - { - 'key': 'startup-script', - 'value': SETUP_NATGATEWAY_SH - } - ] - }, - } - } - - return resource - - -def get_route(project_id, context, route_name, internal_ip, network): - """ Generate a route resource. """ - - resource = { - 'name': route_name, - 'type': 'route.py', - 'properties': - { - 'project': project_id, - 'network': network, - 'routes': - [ - { - 'name': route_name + '-ip', - 'routeType': 'ipaddress', - 'nextHopIp': internal_ip, - 'destRange': '0.0.0.0/0', - 'priority': context.properties['routePriority'], - 'tags': [context.properties['nattedVmTag']] - } - ] - } - } - - return resource - - -def get_managed_instance_group(project_id, - name, - healthcheck, - instance_template_name, - base_instance_name, - zone): - """ Generate a managed instance group resource. """ - - resource = { - 'name': name, - # https://cloud.google.com/compute/docs/reference/rest/v1/instanceGroupManagers - 'type': 'gcp-types/compute-v1:instanceGroupManagers', - 'properties': - { - 'project': project_id, - 'instanceTemplate': - '$(ref.' + instance_template_name + '.selfLink)', - 'baseInstanceName': base_instance_name, - 'zone': zone, - 'targetSize': 1, - 'autoHealingPolicies': - [ - { - 'healthCheck': - '$(ref.' + healthcheck + '.selfLink)', - 'initialDelaySec': 120 - } - ] - } - } - - return resource - - -def generate_config(context): - """ Generate the deployment configuration. """ - - resources = [] - prefix = context.env['name'] - hc_name = prefix + '-healthcheck' - region = context.properties['region'] - project_id = context.properties.get('project', context.env['project']) - network_name = get_network(project_id, context.properties) - subnet_name = get_subnetwork(project_id, context) - - # Health check to be used by the managed instance groups. - resources.append(get_healthcheck(project_id, hc_name)) - - # Firewall rule that allows the healthcheck to work. - resources.append(get_firewall(context, project_id, context.properties.get('network'))) - - # Outputs: - out = {} - - # Create a NAT gateway for each zone specified in the zones property. - for zone in context.properties['zones']: - - # Reserve an internal/external static IP address. - ip_name = prefix + '-ip-' + zone - external_ip_name = prefix + '-ip-external-' + zone - internal_ip_name = prefix + '-ip-internal-' + zone - resources.append( - get_external_internal_ip( - project_id, - ip_name, - external_ip_name, - internal_ip_name, - region, - subnet_name - ) - ) - - external_ip = '$(ref.{}.addresses.{}.address)'.format( - ip_name, - external_ip_name - ) - - internal_ip = '$(ref.{}.addresses.{}.address)'.format( - ip_name, - internal_ip_name - ) - - # Create a NAT gateway instance template. - instance_template_name = prefix + '-insttempl-' + zone - resources.append( - get_instance_template( - project_id, - context, - instance_template_name, - external_ip, - internal_ip, - network_name, - subnet_name - ) - ) - - # Create an Instance Group Manager for Healthcheck and AutoHealing. - instance_group_manager_name = prefix + '-instgrpmgr-' + zone - base_instance_name = prefix + '-gateway-' + zone - resources.append( - get_managed_instance_group( - project_id, - instance_group_manager_name, - hc_name, - instance_template_name, - base_instance_name, - zone - ) - ) - - # Create a route that will allow to use the NAT gateway VM as a - # next hop. - route_name = prefix + '-route-' + zone - resources.append( - get_route(project_id, - context, - route_name, - internal_ip, - network_name) - ) - - # Set outputs grouped by the MIG name. - out[base_instance_name] = { - 'instanceGroupManagerName': instance_group_manager_name, - 'instanceGroupmanagerSelflink': '$(ref.{}.selfLink)'.format( - instance_group_manager_name - ), - 'externalIP': external_ip, - 'internalIP': internal_ip, - 'instanceTemplateName': instance_template_name, - 'baseInstanceName': base_instance_name, - 'routeName': route_name, - 'zone': zone - } - - outputs = [ - { - 'name': 'natGateways', - 'value': out - }, - { - 'name': 'networkName', - 'value': network_name - }, - { - 'name': 'subnetworkName', - 'value': subnet_name - }, - { - 'name': 'natGatewayTag', - 'value': context.properties['natGatewayTag'] - }, - { - 'name': 'nattedVmTag', - 'value': context.properties['nattedVmTag'] - }, - { - 'name': 'region', - 'value': region - }, - { - 'name': 'healthCheckName', - 'value': hc_name - } - ] - - return {'resources': resources, 'outputs': outputs} diff --git a/dm/templates/nat_gateway/nat_gateway.py.schema b/dm/templates/nat_gateway/nat_gateway.py.schema deleted file mode 100644 index 87abe82b469..00000000000 --- a/dm/templates/nat_gateway/nat_gateway.py.schema +++ /dev/null @@ -1,191 +0,0 @@ -# Copyright 2017 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Highly Available NAT Gateway - author: Sourced Group Inc. - version: 1.0.0 - description: | - Supports creation of an HA NAT gateway. Internal network address - translation (NAT) gateway instances can route traffic from internal-only - virtual machine instances to the Internet. This allows you to use a limited - number of external IP address to send traffic from multiple virtual machine - instances while exposing a small set of NAT gateway virtual machines to the - Internet. - - APIs endpoints used by this template: - - gcp-types/compute-v1:instanceGroupManagers => - https://cloud.google.com/compute/docs/reference/rest/v1/instanceGroupManagers - -imports: - - path: ../healthcheck/healthcheck.py - name: healthcheck.py - - path: ../instance_template/instance_template.py - name: instance_template.py - - path: ../ip_reservation/ip_reservation.py - name: ip_reservation.py - - path: ../route/route.py - name: route.py - - path: ../firewall/firewall.py - name: firewall.py - -additionalProperties: false - -required: - - network - - subnetwork - - natIpRange - - region - - zones - - natGatewayTag - - nattedVmTag - -properties: - project: - type: string - description: | - The project ID of the project containing the NAT instance. The - Google apps domain is prefixed if applicable. - network: - type: string - description: The VPC network to connect the NAT gateway VMs to. - subnetwork: - type: string - description: The subnetwork of the VPC network. - natIpRange: - type: string - description: The IP range to allow NAT through the NAT gateways. - region: - type: string - default: "us-central1" - description: The region where the NAT gateway VMs are deployed. - zones: - type: array - description: | - Zones where the NAT gateway GCE VMs are deployed. - minItems: 2 - items: - type: string - machineType: - type: string - default: "n1-standard-1" - description: The machine type for the NAT gateway VMs. - imageType: - type: string - default: "projects/centos-cloud/global/images/family/centos-6" - description: The image type for the NAT gateway VMs. - startupScript: - type: string - description: | - The startup script that runs when the NAT gateway VMs are started. - diskType: - type: string - default: pd-standard - description: | - The persistent disk type used as a boot disk for the NAT gateway VMs. - enum: - - pd-standard - - pd-ssd - diskSizeGb: - type: number - default: 10 - description: The size of the persistent disk used by the NAT gateway VMs. - minimum: 10 - maximum: 65536 - natGatewayTag: - type: string - defaut: natgw - description: Teh tag applied to the NAT gateway VMs. - nattedVmTag: - type: string - default: no-ip - description: | - The tag to be applied to the GCE VMs so that they can use the NAT - gateways. - routePriority: - type: number - default: 800 - description: | - The priority the routes to the NAT gateway VMs are created with. - minimum: 0 - maximum: 65535 - -outputs: - natGateways: - type: array - description: | - The list of the NAT gateways created. For example, the output can be - referenced as: - $(ref..natGateways..externalIP). - Note that `natgatewaybasename` is the base instance name to use for - instances in the group, and is not the exact name for the deployed - instance. For example, if the `natgatewaybasename` value is `my-nat`, - the provisioned instance name could be `my-nat-xkje`. - items: - instanceGroupManagerName: - type: string - description: | - The name of the Instance Group Manager used for monitoring and - autohealing. - instanceGroupManagerSelflink: - type: string - description: The URI of the Instance Group Manager resource. - externalIP: - type: string - description: The external IP addresses set to the NAT gateway VM. - internalIP: - type: string - description: The internal IP addresses set to the NAT gateway VM. - instanceTemplateName: - type: string - description: | - The name of the Instance Template to be used by the Instance - Group Manager for monitoring and autohealing. - baseInstanceName: - type: string - description: The base instance name to use for instances in the group. - routeName: - type: string - description: | - The name of the route that forwards traffic through the NAT - gateways. - zones: - type: array - description: Zones where the NAT gateways are deployed for HA. - networkName: - type: string - description: The VPC network on which the NAT is performed. - subnetworkName: - type: string - description: The NAT'd subnet/IP range. - natGatewayTag: - type: string - description: The tag used to pin the NAT gateway VMs. - nattedVmTag: - type: string - description: The tag used for the internal VMs to be NAT'd. - region: - type: string - description: The region where the NAT gateways are deployed. - healthCheckName: - type: string - description: | - The name of the healthCheck counter used by the Instance Group - Manager. - -documentation: - - templates/nat_gateway/README.md - -examples: - - templates/nat_gateway/examples/nat_gateway.yaml diff --git a/dm/templates/nat_gateway/tests/integration/nat_gateway.bats b/dm/templates/nat_gateway/tests/integration/nat_gateway.bats deleted file mode 100755 index 4d522a4fd47..00000000000 --- a/dm/templates/nat_gateway/tests/integration/nat_gateway.bats +++ /dev/null @@ -1,215 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-natgatewayha.txt" -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-natgatewayha-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" -fi - -export PROJECT_NUMBER=$(gcloud projects describe ${CLOUD_FOUNDATION_PROJECT_ID} | grep projectNumber | sed 's/[^0-9]*//g') - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < ${BATS_TEST_DIRNAME}/${TEST_NAME}.yaml > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - gcloud compute networks create "network-${RAND}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --description "integration test ${RAND}" \ - --subnet-mode custom - gcloud compute networks subnets create "subnet-${RAND}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --network "network-${RAND}" \ - --range 10.0.1.0/24 \ - --region us-east1 - create_config - fi - # Per-test setup steps. -} - -function teardown() { - # Global teardown; executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - gcloud compute networks subnets delete "subnet-${RAND}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --region us-east1 -q - gcloud compute networks delete "network-${RAND}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -q - delete_config - rm -f "${RANDOM_FILE}" - fi - # Per-test teardown steps. -} - - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - run gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config "${CONFIG}" --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - - - # Enabling OS login for the next tests - run gcloud compute instances add-metadata "test-inst-has-ext-ip-${RAND}" \ - --metadata enable-oslogin=TRUE \ - --zone "us-east1-b" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - echo "Pre-run Status: $status" - echo "Pre-run Output: $output" - - [[ "$status" -eq 0 ]] - - run gcloud compute ssh "test-inst-has-ext-ip-${RAND}" --zone "us-east1-b" \ - --command "echo 'OK' " \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "SSH Status: $status" - echo "SSH Output: $output" - - echo "sleeping 30" - sleep 30 - - [[ "$status" -eq 0 ]] -} - -@test "Verifying that resources were created in deployment ${DEPLOYMENT_NAME}" { - run gcloud compute instances list --filter="name:test-nat-gateway-${RAND}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "test-nat-gateway-${RAND}-gateway-us-east1-b" ]] - [[ "$output" =~ "test-nat-gateway-${RAND}-gateway-us-east1-c" ]] - [[ "$output" =~ "test-nat-gateway-${RAND}-gateway-us-east1-d" ]] -} - -@test "Verifying that external IP was created in deployment ${DEPLOYMENT_NAME}" { - run gcloud compute addresses list \ - --filter="name:test-nat-gateway-${RAND}-ip-external" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "test-nat-gateway-${RAND}-ip-external-us-east1-b" ]] - [[ "$output" =~ "test-nat-gateway-${RAND}-ip-external-us-east1-c" ]] - [[ "$output" =~ "test-nat-gateway-${RAND}-ip-external-us-east1-d" ]] -} - -@test "Verifying that internal IP was created in deployment ${DEPLOYMENT_NAME}" { - run gcloud compute addresses list \ - --filter="name:test-nat-gateway-${RAND}-ip-internal" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "test-nat-gateway-${RAND}-ip-internal-us-east1-b" ]] - [[ "$output" =~ "test-nat-gateway-${RAND}-ip-internal-us-east1-c" ]] - [[ "$output" =~ "test-nat-gateway-${RAND}-ip-internal-us-east1-d" ]] -} - -@test "Verifying that routes were created in deployment ${DEPLOYMENT_NAME}" { - run gcloud compute routes list \ - --filter="name:test-nat-gateway-${RAND}-route" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "test-nat-gateway-${RAND}-route-us-east1-b" ]] - [[ "$output" =~ "test-nat-gateway-${RAND}-route-us-east1-c" ]] - [[ "$output" =~ "test-nat-gateway-${RAND}-route-us-east1-d" ]] -} - -@test "Verifying that firewall rule was created in deployment ${DEPLOYMENT_NAME}" { - run gcloud compute firewall-rules list \ - --filter="name:test-nat-gateway-${RAND}-healthcheck-firewall" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "test-nat-gateway-${RAND}-healthcheck-firewall" ]] -} - -@test "Verifying NAT functionality created in deployment ${DEPLOYMENT_NAME}" { - # SSH into the instance with external IP and SSH into the instance without - # an external IP that is using the NAT gateway and successfully execute - # wget on a site. - run gcloud compute ssh "test-inst-has-ext-ip-${RAND}" --zone "us-east1-b" \ - --ssh-flag="-q" \ - --command "gcloud compute ssh test-inst-nat-no-ext-ip-${RAND} \ - --internal-ip --command 'wget google.com' --zone 'us-east1-b' \ - --quiet" \ - --quiet - - echo "status = ${status}" - echo "output = ${output}" - - [[ "$status" -eq 0 ]] - [[ "$output" =~ "HTTP request sent, awaiting response... 200 OK" ]] - - # SSH into the instance with external IP and SSH into the instance without - # an external IP that is not using the NAT gateway. The wget command will - # fail. - run gcloud compute ssh "test-inst-has-ext-ip-${RAND}" --zone "us-east1-b" \ - --ssh-flag="-q" \ - --command "gcloud compute ssh test-inst-no-ext-ip-${RAND} --internal-ip \ - --command 'wget google.com --timeout=5' --zone 'us-east1-b' \ - --quiet" \ - --quiet - - echo "status = ${status}" - echo "output = ${output}" - - [[ "$output" =~ "failed: Network is unreachable" ]] -} - -@test "Deleting deployment" { - run gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" -q \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - - run gcloud compute instances list \ - --filter="name:test-nat-gateway-${RAND}-gw-1-us-east1-b" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ ! "$output" =~ "test-nat-gateway-${RAND}-gateway-us-east1-b" ]] - [[ ! "$output" =~ "test-nat-gateway-${RAND}-gateway-us-east1-c" ]] - [[ ! "$output" =~ "test-nat-gateway-${RAND}-gateway-us-east1-d" ]] - - run gcloud compute addresses list \ - --filter="name:test-nat-gateway-${RAND}-ip" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ ! "$output" =~ "test-nat-gateway-${RAND}-ip-external-us-east1-b" ]] - [[ ! "$output" =~ "test-nat-gateway-${RAND}-ip-external-us-east1-c" ]] - [[ ! "$output" =~ "test-nat-gateway-${RAND}-ip-external-us-east1-d" ]] - [[ ! "$output" =~ "test-nat-gateway-${RAND}-ip-internal-us-east1-b" ]] - [[ ! "$output" =~ "test-nat-gateway-${RAND}-ip-internal-us-east1-c" ]] - [[ ! "$output" =~ "test-nat-gateway-${RAND}-ip-internal-us-east1-d" ]] - - run gcloud compute routes list \ - --filter="name:test-nat-gateway-${RAND}-route" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ ! "$output" =~ "test-nat-gateway-${RAND}-route-us-east1-b" ]] - [[ ! "$output" =~ "test-nat-gateway-${RAND}-route-us-east1-c" ]] - [[ ! "$output" =~ "test-nat-gateway-${RAND}-route-us-east1-d" ]] - - run gcloud compute firewall-rules list \ - --filter="name:test-nat-gateway-${RAND}-healthcheck-firewall" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ ! "$output" =~ "test-nat-gateway-${RAND}-healthcheck-firewall" ]] -} diff --git a/dm/templates/nat_gateway/tests/integration/nat_gateway.yaml b/dm/templates/nat_gateway/tests/integration/nat_gateway.yaml deleted file mode 100644 index 7f95c0fd127..00000000000 --- a/dm/templates/nat_gateway/tests/integration/nat_gateway.yaml +++ /dev/null @@ -1,106 +0,0 @@ -# Test of the HA NAT gateway template. - -imports: - - path: templates/nat_gateway/nat_gateway.py - name: nat_gateway.py - - path: templates/instance/instance.py - name: instance.py - -resources: - - name: test-nat-gateway-${RAND} - type: nat_gateway.py - properties: - network: network-${RAND} - subnetwork: subnet-${RAND} - natIpRange: 10.0.1.0/24 - region: us-east1 - zones: - - us-east1-b - - us-east1-c - - us-east1-d - natGatewayTag: natgw - nattedVmTag: no-ip - -# Test prerequisites: - - # Instance using NAT with no external IP. - - name: test-inst-nat-no-ext-ip-${RAND} - type: instance.py - properties: - zone: us-east1-b - diskImage: projects/ubuntu-os-cloud/global/images/family/ubuntu-1804-lts - diskSizeGb: 100 - machineType: f1-micro - diskType: pd-ssd - canIpForward: true - network: network-${RAND} - subnetwork: regions/us-east1/subnetworks/subnet-${RAND} - serviceAccounts: - - email: ${PROJECT_NUMBER}-compute@developer.gserviceaccount.com - scopes: - - "https://www.googleapis.com/auth/cloud-platform" - hasExternalIp: false - tags: - items: - - no-ip - - # Instance not using NAT with no external IP. - - name: test-inst-no-ext-ip-${RAND} - type: instance.py - properties: - zone: us-east1-b - diskImage: projects/debian-cloud/global/images/debian-9-stretch-v20181011 - diskSizeGb: 100 - machineType: f1-micro - diskType: pd-ssd - canIpForward: true - network: network-${RAND} - subnetwork: regions/us-east1/subnetworks/subnet-${RAND} - serviceAccounts: - - email: ${PROJECT_NUMBER}-compute@developer.gserviceaccount.com - scopes: - - "https://www.googleapis.com/auth/cloud-platform" - hasExternalIp: false - - # Instance with external IP. - - name: test-inst-has-ext-ip-${RAND} - type: instance.py - properties: - zone: us-east1-b - diskImage: projects/debian-cloud/global/images/debian-9-stretch-v20181011 - diskSizeGb: 100 - machineType: f1-micro - diskType: pd-ssd - canIpForward: true - network: network-${RAND} - subnetwork: regions/us-east1/subnetworks/subnet-${RAND} - serviceAccounts: - - email: ${PROJECT_NUMBER}-compute@developer.gserviceaccount.com - scopes: - - "https://www.googleapis.com/auth/cloud-platform" - - # Firewall rules for ICMP and SSH - - name: test-firewall-${RAND} - type: firewall.py - properties: - network: network-${RAND} - rules: - - name: test-icmp-${RAND} - allowed: - - IPProtocol: icmp - description: Allow all traffic out through NAT GW - direction: INGRESS - priority: 800 - sourceRanges: - - 10.0.1.0/24 - - name: test-ssh-http-${RAND} - allowed: - - IPProtocol: tcp - ports: - - "22" - - "80" - description: rule for ssh and http - direction: INGRESS - priority: 800 - sourceRanges: - - 0.0.0.0/0 diff --git a/dm/templates/network/README.md b/dm/templates/network/README.md deleted file mode 100644 index 7c6b30da31e..00000000000 --- a/dm/templates/network/README.md +++ /dev/null @@ -1,68 +0,0 @@ -# Network and subnetwork - -This template creates a network, optionally with subnetworks. - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) -- Grant the [compute.networkAdmin or compute.admin](https://cloud.google.com/compute/docs/access/iam) IAM role to the project service account - -## Deployment - -### Resources - -- [compute.v1.network](https://cloud.google.com/compute/docs/reference/latest/networks) -- [compute.v1.subnetwork](https://cloud.google.com/compute/docs/reference/latest/subnetworks) - -### Properties - -See the `properties` section in the schema file(s): - -- [Network](network.py.schema) -- [Subnetwork](subnetwork.py.schema) - - -### Usage - - -1. Clone the [Deployment Manager samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit): - -```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit -``` - -2. Go to the [dm](../../) directory: - -```shell - cd dm -``` - -3. Copy the example DM config to be used as a model for the deployment; in this case, [examples/network.yaml](examples/network.yaml): - -```shell - cp templates/network/examples/network.yaml my_network.yaml -``` - -4. Change the values in the config file to match your specific GCP setup (for properties, refer to the schema files listed above): - -```shell - vim my_network.yaml # <== change values to match your GCP setup -``` - -5. Create your deployment (replace with the relevant deployment name): - -```shell - gcloud deployment-manager deployments create \ - --config my_network.yaml -``` - -6. In case you need to delete your deployment: - -```shell - gcloud deployment-manager deployments delete -``` - -## Examples - -- [Network with subnetworks](examples/network.yaml) diff --git a/dm/templates/network/examples/network.yaml b/dm/templates/network/examples/network.yaml deleted file mode 100644 index 4a3674536fd..00000000000 --- a/dm/templates/network/examples/network.yaml +++ /dev/null @@ -1,32 +0,0 @@ -# Example of the network template usage. -# -# In this example, a network is created. Subnetworks can be created -# via the `subnetworks` property of the `network.py` template (such as -# test-subnetwork-1) -# -# Note that the `name` property of the `templates/network.py` resource type is -# optional. If omitted, the DM resource name is used. - -imports: - - path: templates/network/network.py - name: network.py - -resources: - - name: test-network - type: network.py - properties: - autoCreateSubnetworks: false - subnetworks: - - name: test-subnetwork-1 - region: us-east1 - ipCidrRange: 10.0.0.0/24 - privateIpGoogleAccess: false - enableFlowLogs: true - secondaryIpRanges: - - rangeName: my-secondary-range-1 - ipCidrRange: 10.0.1.0/24 - - rangeName: my-secondary-range-2 - ipCidrRange: 10.0.2.0/24 - - name: test-subnetwork-2 - region: us-east1 - ipCidrRange: 192.168.0.0/24 diff --git a/dm/templates/network/network.py b/dm/templates/network/network.py deleted file mode 100644 index 1793a0251fa..00000000000 --- a/dm/templates/network/network.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" This template creates a network, optionally with subnetworks. """ - - -def append_optional_property(res, properties, prop_name): - """ If the property is set, it is added to the resource. """ - - val = properties.get(prop_name) - if val: - res['properties'][prop_name] = val - return - - -def generate_config(context): - """ Entry point for the deployment resources. """ - - properties = context.properties - name = properties.get('name', context.env['name']) - network_self_link = '$(ref.{}.selfLink)'.format(context.env['name']) - - network_resource = { - # https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert - 'type': 'gcp-types/compute-v1:networks', - 'name': context.env['name'], - 'properties': - { - 'name': name, - 'autoCreateSubnetworks': properties.get('autoCreateSubnetworks', False) - } - } - optional_properties = [ - 'description', - 'routingConfig', - 'project', - ] - for prop in optional_properties: - append_optional_property(network_resource, properties, prop) - resources = [network_resource] - - # Subnetworks: - out = {} - for i, subnetwork in enumerate( - properties.get('subnetworks', []), 1 - ): - subnetwork['network'] = network_self_link - if properties.get('project'): - subnetwork['project'] = properties.get('project') - - subnetwork_name = 'subnetwork-{}'.format(i) - resources.append( - { - 'name': subnetwork_name, - 'type': 'subnetwork.py', - 'properties': subnetwork - } - ) - - out[subnetwork_name] = { - 'selfLink': '$(ref.{}.selfLink)'.format(subnetwork_name), - 'ipCidrRange': '$(ref.{}.ipCidrRange)'.format(subnetwork_name), - 'region': '$(ref.{}.region)'.format(subnetwork_name), - 'network': '$(ref.{}.network)'.format(subnetwork_name), - 'gatewayAddress': '$(ref.{}.gatewayAddress)'.format(subnetwork_name) - } - - return { - 'resources': - resources, - 'outputs': - [ - { - 'name': 'name', - 'value': name - }, - { - 'name': 'selfLink', - 'value': network_self_link - }, - { - 'name': 'subnetworks', - 'value': out - } - ] - } diff --git a/dm/templates/network/network.py.schema b/dm/templates/network/network.py.schema deleted file mode 100644 index 22f4cb3bbec..00000000000 --- a/dm/templates/network/network.py.schema +++ /dev/null @@ -1,160 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Network - author: Sourced Group Inc. - version: 1.1.1 - description: | - Creates a network. - - For more information on this resource: - - https://cloud.google.com/vpc/docs/vpc - - APIs endpoints used by this template: - - gcp-types/compute-v1:networks => - https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert - -imports: - - path: subnetwork.py - -additionalProperties: false - -oneOf: - - properties: - autoCreateSubnetworks: - enum: - - true - - properties: - subnetworks: - type: array - default: [] - minItems: 1 - - properties: - autoCreateSubnetworks: - enum: - - false - subnetworks: - type: array - default: [] - minItems: 0 - maxItems: 0 - -properties: - name: - type: string - description: | - Name of the network resource. Resource name would be used if omitted. - project: - type: string - description: | - The project ID of the project containing the Cloud Router instance. The - Google apps domain is prefixed if applicable. - description: - type: string - description: | - An optional description of this resource. Provide this property when you create the resource. - routingConfig: - type: object - additionalProperties: false - description: | - The network-level routing configuration for this network. Used by Cloud Router to determine what type - of network-wide routing behavior to enforce. - required: - - routingMode - properties: - routingMode: - type: string - description: | - The network-wide routing mode to use. If set to REGIONAL, this network's cloud routers will only advertise - routes with subnets of this network in the same region as the router. If set to GLOBAL, this network's - cloud routers will advertise routes with all subnets of this network, across regions. - enum: - - GLOBAL - - REGIONAL - autoCreateSubnetworks: - type: boolean - default: false - description: | - If "true": (a) the newly created network is assigned the default CIDR of - 10.128.0.0/9; and (b) one subnetwork per region is created automatically. - subnetworks: - type: array - default: [] - description: | - An array of subnetworks, as defined in the `subnetwork.py` template. - Example: - - name: test-subnetwork-1 - region: us-east1 - ipCidrRange: 10.116.48.0/22 - privateIpGoogleAccess: false - enableFlowLogs: true - secondaryIpRanges: - - rangeName: my-secondary-range-1 - ipCidrRange: 172.16.0.0/24 - - rangeName: my-secondary-range-2 - ipCidrRange: 172.16.1.0/24 - items: - type: object - allOf: - - not: - required: - - project - - not: - required: - - network - -outputs: - name: - type: string - description: The network resource name. - selfLink: - type: string - description: The URI (SelfLink) of the network resource. - subnetworks: - type: array - description: Array of subnetwork` information. - items: - description: | - The name of the subnetwork resource. For example, the output can be - referenced as: $(ref..subnetworks..selfLink) - patternProperties: - ".*": - type: object - description: Details for a subnetwork resource. - properties: - - selfLink: - type: string - description: The URI (SelfLink) of the subnet resource. - - region: - type: string - description: The name of the region where the subnetwork resides. - - network: - type: string - description: The URL of the network to which the subnetwork belongs. - - ipCidrRange: - type: string - description: | - The range of internal addresses owned by the subnetwork. - - gatewayAddress: - type: string - description: | - The gateway address for default routes to reach destination addresses - outside this subnetwork. - -documentation: - - templates/network/README.md - -examples: - - templates/network/examples/network.yaml diff --git a/dm/templates/network/subnetwork.py b/dm/templates/network/subnetwork.py deleted file mode 100644 index a2bc81ede03..00000000000 --- a/dm/templates/network/subnetwork.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" This template creates a subnetwork. """ - - -def generate_config(context): - """ Entry point for the deployment resources. """ - - props = context.properties - props['name'] = props.get('name', context.env['name']) - required_properties = ['name', 'network', 'ipCidrRange', 'region'] - optional_properties = [ - 'project', - 'enableFlowLogs', - 'privateIpGoogleAccess', - 'secondaryIpRanges' - ] - - # Load the mandatory properties, then the optional ones (if specified). - properties = {p: props[p] for p in required_properties} - properties.update( - { - p: props[p] - for p in optional_properties - if p in props - } - ) - - resources = [ - { - # https://cloud.google.com/compute/docs/reference/rest/v1/subnetworks/insert - 'type': 'gcp-types/compute-v1:subnetworks', - 'name': context.env['name'], - 'properties': properties - } - ] - - output = [ - { - 'name': 'name', - 'value': properties['name'] - }, - { - 'name': 'selfLink', - 'value': '$(ref.{}.selfLink)'.format(context.env['name']) - }, - { - 'name': 'ipCidrRange', - 'value': '$(ref.{}.ipCidrRange)'.format(context.env['name']) - }, - { - 'name': 'region', - 'value': '$(ref.{}.region)'.format(context.env['name']) - }, - { - 'name': 'network', - 'value': '$(ref.{}.network)'.format(context.env['name']) - }, - { - 'name': 'gatewayAddress', - 'value': '$(ref.{}.gatewayAddress)'.format(context.env['name']) - } - ] - - return {'resources': resources, 'outputs': output} diff --git a/dm/templates/network/subnetwork.py.schema b/dm/templates/network/subnetwork.py.schema deleted file mode 100644 index 4e8ad81eded..00000000000 --- a/dm/templates/network/subnetwork.py.schema +++ /dev/null @@ -1,134 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Subnet - author: Sourced Group Inc. - version: 1.0.0 - description: | - Creates a subnetwork. - - For more information on this resource: - - https://cloud.google.com/vpc/docs/vpc - - APIs endpoints used by this template: - - gcp-types/compute-v1:subnetworks => - https://cloud.google.com/compute/docs/reference/rest/v1/subnetworks/insert - -additionalProperties: false - -required: - - network - - region - - ipCidrRange - -properties: - name: - type: string - description: | - The name of the resource, provided by the client when initially creating the resource. The name must - be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and - match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase - letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, - which cannot be a dash. - If not specified, the DM resource name is used. - project: - type: string - description: | - The project ID of the project containing the Cloud Router instance. The - Google apps domain is prefixed if applicable. - network: - type: string - description: | - The URL of the network to which the subnetwork belongs. For example: - - projects//global/networks/ - region: - type: string - description: The name of the region where the subnetwork resides. - ipCidrRange: - type: string - pattern: ^([0-9]{1,3}\.){3}[0-9]{1,3}\/[0-9]{1,2}$ - description: | - The range of internal addresses owned by the subnetwork. - Ranges must be unique and non-overlapping within a network. - Only IPv4 is supported. For example, 10.0.0.0/8 or 192.168.0.0/16. - privateIpGoogleAccess: - type: boolean - default: true - description: | - Defines whether the VMs in this subnetwork can access Google services - without assigned external IP addresses. This field can be either set at the - resource creation time or updated using setPrivateIpGoogleAccess. - secondaryIpRanges: - type: array - description: | - An array of configurations for the secondary IP ranges of VM instances - contained in this subnetwork. The primary IP of a VM must belong to the - primary ipCidrRange of the subnetwork. The alias IPs may belong to either - primary or secondary ranges. For example: - - rangeName: my-secondary-range-1 - ipCidrRange: 172.16.0.0/24 - - rangeName: my-secondary-range-2 - ipCidrRange: 172.16.1.0/24 - items: - type: object - additionalProperties: false - required: - - rangeName - - ipCidrRange - properties: - rangeName: - type: string - description: | - The name associated with this subnetwork secondary range, used when adding an alias IP range - to a VM instance. The name must be 1-63 characters long, and comply with RFC1035. - The name must be unique within the subnetwork. - ipCidrRange: - type: string - description: | - The range of IP addresses belonging to this subnetwork secondary range. Provide this property - when you create the subnetwork. Ranges must be unique and non-overlapping with all primary - and secondary IP ranges within a network. Only IPv4 is supported. - enableFlowLogs: - type: boolean - description: If "true", enables flow logging for the subnetwork. - -outputs: - name: - type: string - description: The subnet resource name. - selfLink: - type: string - description: The URI (SelfLink) of the subnet resource. - region: - type: string - description: The name of the region where the subnetwork resides. - network: - type: string - description: The URL of the network to which the subnetwork belongs. - ipCidrRange: - type: string - description: | - The range of internal addresses owned by the subnetwork. - gatewayAddress: - type: string - description: | - The gateway address for default routes to reach destination addresses - outside this subnetwork. - -documentation: - - templates/network/README.md - -examples: - - templates/network/examples/network.yaml diff --git a/dm/templates/network/tests/integration/network.bats b/dm/templates/network/tests/integration/network.bats deleted file mode 100644 index 8f8fc80d62d..00000000000 --- a/dm/templates/network/tests/integration/network.bats +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-network.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-network-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < templates/network/tests/integration/network.yaml > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; this is executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; this is executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - delete_config - rm -f "${RANDOM_FILE}" - fi - - # Per-test teardown steps. -} - - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" --config "${CONFIG}" --project "${CLOUD_FOUNDATION_PROJECT_ID}" -} - -@test "Verifying that resources were created in deployment ${DEPLOYMENT_NAME}" { - run gcloud compute networks list --filter="name:test-network-${RAND}" --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$output" =~ "test-network-${RAND}" ]] -} - -@test "Verifying subnets were created in deployment ${DEPLOYMENT_NAME}" { - run gcloud compute networks subnets list --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$output" =~ "test-subnetwork-${RAND}-1" ]] - [[ "$output" =~ "test-subnetwork-${RAND}-2" ]] -} - -@test "Deleting deployment" { - gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" -q --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - run gcloud compute networks list --filter="name:test-network-${RAND}" --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ ! "$output" =~ "test-network-${RAND}" ]] - - run gcloud compute networks subnets list --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ ! "$output" =~ "test-subnetwork-${RAND}-1" ]] - [[ ! "$output" =~ "test-subnetwork-${RAND}-2" ]] -} diff --git a/dm/templates/network/tests/integration/network.yaml b/dm/templates/network/tests/integration/network.yaml deleted file mode 100644 index d4dec03b8a5..00000000000 --- a/dm/templates/network/tests/integration/network.yaml +++ /dev/null @@ -1,48 +0,0 @@ -# Test of the network and subnetwork templates. -# -# Variables: -# RAND: A random string used by the testing suite. -# - -imports: - - path: templates/network/network.py - name: network.py - - path: templates/network/subnetwork.py - name: subnetwork.py - -resources: - - name: test-network-${RAND} - type: network.py - properties: - autoCreateSubnetworks: false - subnetworks: - - name: test-subnetwork-${RAND}-1 - region: us-east1 - ipCidrRange: 10.0.0.0/24 - privateIpGoogleAccess: false - enableFlowLogs: true - secondaryIpRanges: - - rangeName: my-secondary-range-${RAND}-1 - ipCidrRange: 10.0.1.0/24 - - rangeName: my-secondary-range-${RAND}-2 - ipCidrRange: 10.0.2.0/24 - - name: test-subnetwork-${RAND}-2 - type: subnetwork.py - properties: - network: $(ref.test-network-${RAND}.selfLink) - region: us-east1 - ipCidrRange: 192.168.0.0/16 - - name: test-subnetwork-${RAND}-3 - type: subnetwork.py - properties: - name: test-subnetwork-${RAND}-3 - network: $(ref.test-network-${RAND}.selfLink) - region: us-east1 - ipCidrRange: 172.16.0.0/24 - privateIpGoogleAccess: false - enableFlowLogs: true - secondaryIpRanges: - - rangeName: my-secondary-range-${RAND}-1 - ipCidrRange: 172.16.1.0/24 - - rangeName: my-secondary-range-${RAND}-2 - ipCidrRange: 172.16.2.0/24 diff --git a/dm/templates/network/tests/schemas/invalid_subnets.yaml b/dm/templates/network/tests/schemas/invalid_subnets.yaml deleted file mode 100644 index d60702cf438..00000000000 --- a/dm/templates/network/tests/schemas/invalid_subnets.yaml +++ /dev/null @@ -1,15 +0,0 @@ -autoCreateSubnetworks: true -subnetworks: - - name: test-subnetwork-1 - region: us-east1 - ipCidrRange: 10.0.0.0/24 - privateIpGoogleAccess: false - enableFlowLogs: true - secondaryIpRanges: - - rangeName: my-secondary-range-1 - ipCidrRange: 10.0.1.0/24 - - rangeName: my-secondary-range-2 - ipCidrRange: 10.0.2.0/24 - - name: test-subnetwork-2 - region: us-east1 - ipCidrRange: 192.168.0.0/24 diff --git a/dm/templates/network/tests/schemas/valid_auto.yaml b/dm/templates/network/tests/schemas/valid_auto.yaml deleted file mode 100644 index 3de5b553e29..00000000000 --- a/dm/templates/network/tests/schemas/valid_auto.yaml +++ /dev/null @@ -1 +0,0 @@ -autoCreateSubnetworks: true diff --git a/dm/templates/network/tests/schemas/valid_subnets.yaml b/dm/templates/network/tests/schemas/valid_subnets.yaml deleted file mode 100644 index 0bcd684fc5f..00000000000 --- a/dm/templates/network/tests/schemas/valid_subnets.yaml +++ /dev/null @@ -1,15 +0,0 @@ -autoCreateSubnetworks: false -subnetworks: - - name: test-subnetwork-1 - region: us-east1 - ipCidrRange: 10.0.0.0/24 - privateIpGoogleAccess: false - enableFlowLogs: true - secondaryIpRanges: - - rangeName: my-secondary-range-1 - ipCidrRange: 10.0.1.0/24 - - rangeName: my-secondary-range-2 - ipCidrRange: 10.0.2.0/24 - - name: test-subnetwork-2 - region: us-east1 - ipCidrRange: 192.168.0.0/24 diff --git a/dm/templates/project/README.md b/dm/templates/project/README.md deleted file mode 100644 index 2f927a236ef..00000000000 --- a/dm/templates/project/README.md +++ /dev/null @@ -1,138 +0,0 @@ -# Project - -This template: - -1. Creates a new project. -2. Sets a billing account for the new project -3. Sets IAM permissions in the new project -4. Turns on a set of APIs in the new project -5. Creates service accounts for the new project -6. Creates a usage export Cloud Storage bucket for the new project -7. Removed default networks, firewalls -8. Removes default Service Account -9. Creates VPC host or attached VPC service project - -## Prerequisites - -Following are the prerequisites for creating a project via Deployment Manager. You can perform some of the steps via the Cloud Console at https://console.cloud.google.com/. The `gcloud` command line tool is used to deploy the configs. - -`Note:` Permission changes can take up to 20 minutes to propagate. If you run commands before the propagation is completed, you may receive errors regarding the user not having permissions. - -`Note:` "If you have [Shared VPC Admin role](https://cloud.google.com/vpc/docs/provisioning-shared-vpc#enable-shared-vpc-host) at the folder level, you need to use gcloud beta or the beta API." Some version of the Project Factory is using the GA API, which means SharedVPC features may result a permission error. See [Issue #403](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/403) - - -1. Install [gcloud](https://cloud.google.com/sdk). - -2. Create a project that will create and own the deployments (henceforth referred to as *DM Creation Project*). See: https://cloud.google.com/resource-manager/docs/creating-managing-organization. - - `Important:` Because of the special permissions granted to the *DM Creation Project*, it should not be used for any purpose other than creating other projects. - -3. Activate the following APIs for the *DM Creation Project*: - * Google Cloud Deployment Manager V2 API - * Google Cloud Resource Manager API - * Google Cloud Billing API - * Google Identity and Access Management (IAM) API - * Google Service Management API - - You may use the `gcloud services enable` command to do this: - - ```shell - gcloud services enable deploymentmanager.googleapis.com - gcloud services enable cloudresourcemanager.googleapis.com - gcloud services enable cloudbilling.googleapis.com - gcloud services enable iam.googleapis.com - gcloud services enable servicemanagement.googleapis.com - ``` - -4. Find the *Cloud Services* service account associated with the *DM Creation Project*. - - It is formatted as `@cloudservices.gserviceaccount.com`, - and is listed under [IAM & Admin](https://console.cloud.google.com/iam-admin/iam) - in Google Cloud Console. This account is henceforth referred to as the *DM Service Account*. See https://cloud.google.com/resource-manager/docs/access-control-proj. - -5. Create an Organization node. - - If you do not already have an Organization node under which you can create - projects, create that node following [these instructions](https://cloud.google.com/resource-manager/docs/creating-managing-organization). - -6. Grant the *DM Service Account* the following permissions on the Organization node: - - - `roles/resourcemanager.projectCreator` - - `roles/serviceusage.serviceUsageAdmin` - - This is visible in the Cloud Console's IAM permissions in *Resource Manager -> Project Creator* and *Resource Manager -> Service Usage Admin*. See https://cloud.google.com/resource-manager/docs/access-control-proj. - -7. Create/find the *Billing Account* associated with the Organization. See: https://cloud.google.com/support/billing/. Take note of the *Billing Account*'s ID, which is formatted as follows:`00E12A-0AB8B2-078CE8`. - -8. Give the *DM Service Account* the following permissions on the *Billing Account*: `roles/billing.user`. This is visible in Cloud Console's IAM permissions in *Billing -> Billing Account User*. - -9. If the project is a VPC host or guest project, give the *DM Service Account* the following permissions: `roles/compute.xpnAdmin`. - -## Deployment - -### Resources - -- [cloudresourcemanager.v1.project](https://cloud.google.com/compute/docs/reference/latest/projects) -- [deploymentmanager.v2.virtual.projectBillingInfo](https://cloud.google.com/billing/reference/rest/v1/projects/updateBillingInfo) -- [iam.v1.serviceAccount](https://cloud.google.com/iam/reference/rest/v1/projects.serviceAccounts) -- [deploymentmanager.v2.virtual.enableService](https://cloud.google.com/service-management/reference/rest/v1/services/enable) -- [../iam_member CFT template](../iam_member/README.md) -- [gcp-types/cloudresourcemanager-v1:cloudresourcemanager.projects.setIamPolicy](https://cloud.google.com/deployment-manager/docs/configuration/supported-gcp-types) -- [gcp-types/storage-v1:buckets](https://cloud.google.com/deployment-manager/docs/configuration/supported-gcp-types) -- [gcp-types/compute-v1:compute.projects.setUsageExportBucket](https://cloud.google.com/deployment-manager/docs/configuration/supported-gcp-types) -- [compute.beta.xpnResource](https://cloud.google.com/compute/docs/reference/rest/beta/projects/enableXpnResource) -- [compute.beta.xpnHost](https://cloud.google.com/compute/docs/reference/rest/beta/projects/enableXpnHost) -- [gcp-types/compute-v1:compute.firewalls.delete](https://cloud.google.com/compute/docs/reference/rest/v1/firewalls) -- [gcp-types/compute-v1:compute.networks.delete](https://cloud.google.com/compute/docs/reference/rest/v1/networks) -- [gcp-types/iam-v1:iam.projects.serviceAccounts.delete](https://cloud.google.com/iam/reference/rest/v1/projects.serviceAccounts) - -### Properties - -See the `properties` section in the schema file(s): - -- [project](project.py.schema) - -### Usage - -1. Clone the [Deployment Manager Samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit): - -```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit -``` - -2. Go to the [dm](../../) directory: - -```shell - cd dm -``` - -3. Copy the example DM config to be used as a model for the deployment; in this case, [examples/project_standalone.yaml](examples/project_standalone.yaml): - -```shell - cp templates/project/examples/project_standalone.yaml my_project.yaml -``` - -4. Change the values in the config file to match your specific GCP setup (for properties, refer to the schema files listed above): - -```shell - vim my_project.yaml # <== change values to match your GCP setup -``` - -5. Create your deployment (replace with the relevant deployment name): - -```shell - gcloud deployment-manager deployments create \ - --config my_project.yaml -``` - -6. In case you need to delete your deployment: - -```shell - gcloud deployment-manager deployments delete -``` - -## Examples - -- [Standalone project](examples/project_standalone.yaml) -- [VPC host project](examples/project_vpc_host.yaml) -- [VPC consumer project](examples/project_vpc_consumer.yaml) diff --git a/dm/templates/project/examples/project_standalone.yaml b/dm/templates/project/examples/project_standalone.yaml deleted file mode 100644 index 093b3feadfe..00000000000 --- a/dm/templates/project/examples/project_standalone.yaml +++ /dev/null @@ -1,39 +0,0 @@ -# Example of the project template usage. -# -# In this example, a project is created, along with some of the -# resources that an enterprise is likely to need for regular operations. -# -# Note that the `parent.type` property defaults to "organization", so it -# does not need to be specified in the config. It is specified here for -# informational purposes. -# -# See `templates/project.py.schema` for detailed information on the -# properties. -# -# Replace the following with valid values -# : Project name -# : Your organization ID -# : Your billing ID - -imports: - - path: templates/project/project.py - name: project.py - -resources: - - name: - type: project.py - properties: - parent: - type: organization - id: - billingAccountId: - activateApis: - - compute.googleapis.com - - deploymentmanager.googleapis.com - - pubsub.googleapis.com - serviceAccounts: - - accountId: test-sa-1 - displayName: test service account 1 - roles: - - roles/editor - - roles/viewer diff --git a/dm/templates/project/examples/project_vpc_consumer.yaml b/dm/templates/project/examples/project_vpc_consumer.yaml deleted file mode 100644 index 85f48d9d3d8..00000000000 --- a/dm/templates/project/examples/project_vpc_consumer.yaml +++ /dev/null @@ -1,46 +0,0 @@ -# Example of the project template usage. -# -# In this example, a project is created, along with some of the -# resources that an enterprise is likely to need for regular operations. -# -# Note that the `parent.type` property defaults to "organization", so it -# does not need to be specified in the config. It is specified here for -# informational purposes. -# -# See `templates/project.py.schema` for detailed information on the -# properties. -# -# Replace the following with valid values -# : Project name -# : Your organization ID -# : Your billing ID -# : VPC host ID -# : VPC host subnet - -imports: - - path: templates/project/project.py - name: project.py - -resources: - - name: - type: project.py - properties: - parent: - type: organization - id: - billingAccountId: - activateApis: - - compute.googleapis.com - - deploymentmanager.googleapis.com - - pubsub.googleapis.com - serviceAccounts: - - accountId: test-sa-1 - displayName: test service account 1 - roles: - - roles/editor - - roles/viewer - networkAccess: true - sharedVPC: - sharedVPCSubnets: - - subnetId: - region: us-east1 diff --git a/dm/templates/project/examples/project_vpc_host.yaml b/dm/templates/project/examples/project_vpc_host.yaml deleted file mode 100644 index c82abcb16d9..00000000000 --- a/dm/templates/project/examples/project_vpc_host.yaml +++ /dev/null @@ -1,40 +0,0 @@ -# Example of the project template usage. -# -# In this example, a project is created, along with some of the -# resources that an enterprise is likely to need for regular operations. -# -# Note that the `parent.type` property defaults to "organization", so it -# does not need to be specified in the config. It is specified here for -# informational purposes. -# -# See `templates/project.py.schema` for detailed information on the -# properties. -# -# Replace the following with valid values -# : Project name -# : Your organization ID -# : Your billing ID - -imports: - - path: templates/project/project.py - name: project.py - -resources: - - name: - type: project.py - properties: - parent: - type: organization - id: - billingAccountId: - activateApis: - - compute.googleapis.com - - deploymentmanager.googleapis.com - - pubsub.googleapis.com - serviceAccounts: - - accountId: test-sa-1 - displayName: test service account 1 - roles: - - roles/editor - - roles/viewer - sharedVPCHost: true diff --git a/dm/templates/project/project.py b/dm/templates/project/project.py deleted file mode 100644 index 2301b35ac0b..00000000000 --- a/dm/templates/project/project.py +++ /dev/null @@ -1,369 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -This template creates a single project with the specified service -accounts and APIs enabled. -""" -import copy - - -def generate_config(context): - """ Entry point for the deployment resources. """ - - properties = context.properties - project_name = properties.get('name', context.env['name']) - project_id = properties.get('projectId', project_name) - - # Ensure that the parent ID is a string. - properties['parent']['id'] = str(properties['parent']['id']) - - resources = [ - { - 'name': '{}-project'.format(context.env['name']), - # https://cloud.google.com/resource-manager/reference/rest/v1/projects/create - 'type': 'gcp-types/cloudresourcemanager-v1:projects', - 'properties': - { - 'name': project_name, - 'projectId': project_id, - 'parent': properties['parent'], - 'labels' : properties.get('labels', {}) - } - }, - { - 'name': '{}-billing'.format(context.env['name']), - # https://cloud.google.com/billing/reference/rest/v1/projects/updateBillingInfo - 'type': 'deploymentmanager.v2.virtual.projectBillingInfo', - 'properties': - { - 'name': - 'projects/$(ref.{}-project.projectId)'.format(context.env['name']), - 'billingAccountName': - 'billingAccounts/' + - properties['billingAccountId'] - } - } - ] - - api_resources, api_names_list = activate_apis(context) - resources.extend(api_resources) - resources.extend(create_service_accounts(context, project_id)) - - resources.extend(create_shared_vpc(context)) - - return { - 'resources': - resources, - 'outputs': - [ - { - 'name': 'projectId', - 'value': '$(ref.{}-project.projectId)'.format(context.env['name']) - }, - { - 'name': 'projectNumber', - 'value': '$(ref.{}-project.projectNumber)'.format(context.env['name']) - }, - { - 'name': 'serviceAccountDisplayName', - 'value': - '$(ref.{}-project.projectNumber)@cloudservices.gserviceaccount.com'.format(context.env['name']) # pylint: disable=line-too-long - }, - {## This is a workaround to avoid the need of string concatenation in case of referenving to this output. - 'name': 'containerSA', - 'value': 'serviceAccount:service-$(ref.{}-project.projectNumber)@container-engine-robot.iam.gserviceaccount.com'.format(context.env['name']) - }, - { - 'name': 'containerSADisplayName', - 'value': 'service-$(ref.{}-project.projectNumber)@container-engine-robot.iam.gserviceaccount.com'.format(context.env['name']) - }, - { - 'name': - 'resources', - 'value': - [resource['name'] for resource in resources] - } - ] - } - - -def activate_apis(context): - """ Resources for API activation. """ - - properties = context.properties - concurrent_api_activation = properties.get('concurrentApiActivation') - apis = properties.get('activateApis', []) - - if 'storage-component.googleapis.com' not in apis: - if ( - # Enable the storage-component API if the usage export bucket is enabled. - properties.get('usageExportBucket') - ): - apis.append('storage-component.googleapis.com') - - if 'compute.googleapis.com' not in apis: - if ( - properties.get('sharedVPCHost') or - properties.get('sharedVPC') or - properties.get('sharedVPCSubnets') - ): - apis.append('compute.googleapis.com') - - if 'container.googleapis.com' not in apis: - if ( - properties.get('enableGKEToUseSharedVPC') and - properties.get('sharedVPC') - ): - apis.append('container.googleapis.com') - - resources = [] - api_names_list = ['{}-billing'.format(context.env['name'])] - for api in apis: - depends_on = ['{}-billing'.format(context.env['name'])] - # Serialize activation of all APIs by making apis[n] - # depend on apis[n-1]. - if resources and not concurrent_api_activation: - depends_on.append(resources[-1]['name']) - - api_name = '{}-api-{}'.format(context.env['name'], api) - api_names_list.append(api_name) - resources.append( - { - 'name': api_name, - # https://cloud.google.com/service-infrastructure/docs/service-management/reference/rest/v1/services/enable - 'type': 'gcp-types/servicemanagement-v1:servicemanagement.services.enable', - 'metadata': { - 'dependsOn': depends_on - }, - 'properties': - { - 'consumerId': 'project:$(ref.{}-project.projectId)'.format(context.env['name']), - 'serviceName': api - } - } - ) - - # Return the API resources to enable other resources to use them as - # dependencies, to ensure that they are created first. For example, - # the default VPC or service account. - return resources, api_names_list - - -def create_project_iam(context, dependencies, role_member_list): - """ Grant the shared project IAM permissions. """ - - resources = [ - { - # Get the IAM policy first, so as not to remove - # any existing bindings. - 'name': '{}-project-iam-policy'.format(context.env['name']), - 'type': 'cft-iam_project_member.py', - 'properties': { - 'projectId': '$(ref.{}-project.projectId)'.format(context.env['name']), - 'roles': role_member_list, - 'dependsOn': dependencies, - }, - 'metadata': - { - 'dependsOn': dependencies, - 'runtimePolicy': ['UPDATE_ALWAYS'] - } - } - ] - - return resources - - -def create_shared_vpc_subnet_iam(context, dependencies, members_list): - """ Grant the shared VPC subnet IAM permissions to Service Accounts. """ - - resources = [] - - # Grant the Service Accounts access to the shared VPC subnets. - # Note that, until there is a subnetwork IAM patch support, - # setIamPolicy will overwrite any existing policies on the subnet. - for i, subnet in enumerate( - context.properties.get('sharedVPCSubnets'), 1 - ): - resources.append( - { - 'name': '{}-add-vpc-subnet-iam-policy-{}'.format(context.env['name'], i), - # https://cloud.google.com/compute/docs/reference/rest/v1/subnetworks/setIamPolicy - 'type': 'gcp-types/compute-v1:compute.subnetworks.setIamPolicy', # pylint: disable=line-too-long - 'metadata': - { - 'dependsOn': dependencies, - }, - 'properties': - { - 'name': subnet['subnetId'], - 'project': context.properties['sharedVPC'], - 'region': subnet['region'], - 'policy' : { - 'bindings': [ - { - 'role': 'roles/compute.networkUser', - 'members': members_list, - } - ], - }, - } - } - ) - - return resources - - -def create_service_accounts(context, project_id): - """ Create Service Accounts and grant project IAM permissions. """ - - resources = [] - network_list = [ - 'serviceAccount:$(ref.{}-project.projectNumber)@cloudservices.gserviceaccount.com'.format(context.env['name']) - ] - service_account_dep = [] - - if context.properties.get('enableGKEToUseSharedVPC') and context.properties.get('sharedVPC'): - network_list.append( - 'serviceAccount:service-$(ref.{}-project.projectNumber)@container-engine-robot.iam.gserviceaccount.com'.format(context.env['name']) - ) - service_account_dep.append("{}-api-container.googleapis.com".format(context.env['name'])) - - policies_to_add = [] - - for service_account in context.properties['serviceAccounts']: - account_id = service_account['accountId'] - display_name = service_account.get('displayName', account_id) - - # Build a list of SA resources to be used as a dependency - # for permission granting. - name = '{}-service-account-{}'.format(context.env['name'], account_id) - service_account_dep.append(name) - - sa_name = 'serviceAccount:{}@{}.iam.gserviceaccount.com'.format( - account_id, - project_id - ) - - # Check if the member needs shared VPC permissions. Put in - # a list to grant the shared VPC subnet IAM permissions. - if service_account.get('networkAccess'): - network_list.append(sa_name) - - # Build the service account bindings for the project IAM permissions. - for role in service_account['roles']: - policies_to_add.append({'role': role, 'members': [sa_name]}) - - # Create the service account resource. - resources.append( - { - 'name': name, - # https://cloud.google.com/iam/reference/rest/v1/projects.serviceAccounts/create - 'type': 'gcp-types/iam-v1:projects.serviceAccounts', - 'properties': - { - 'accountId': account_id, - 'displayName': display_name, - 'name': 'projects/$(ref.{}-project.projectId)'.format(context.env['name']) - } - # There is a bug in gcp type for IAM that ignores "name" field - } if False else { - 'name': name, - 'type': 'iam.v1.serviceAccount', - 'properties': - { - 'accountId': account_id, - 'displayName': display_name, - 'projectId': '$(ref.{}-project.projectId)'.format(context.env['name']) - } - } - ) - - # Build the group bindings for the project IAM permissions. - for group in context.properties['groups']: - group_name = 'group:{}'.format(group['name']) - for role in group['roles']: - policies_to_add.append({'role': role, 'members': [group_name]}) - - # Check if the group needs shared VPC permissions. Put in - # a list to grant the shared VPC subnet IAM permissions. - if group.get('networkAccess'): - network_list.append(group_name) - - # Create the project IAM permissions. - if policies_to_add: - iam = create_project_iam(context, service_account_dep, policies_to_add) - resources.extend(iam) - - if ( - not context.properties.get('sharedVPCHost') and - context.properties.get('sharedVPCSubnets') and - context.properties.get('sharedVPC') - ): - # Create the shared VPC subnet IAM permissions. - service_account_dep.append("{}-api-compute.googleapis.com".format(context.env['name'])) - resources.extend( - create_shared_vpc_subnet_iam( - context, - service_account_dep, - network_list - ) - ) - - return resources - - -def create_shared_vpc(context): - """ Configure the project Shared VPC properties. """ - - resources = [] - - properties = context.properties - service_project = properties.get('sharedVPC') - if service_project: - resources.append( - { - 'name': '{}-attach-xpn-service-{}'.format(context.env['name'], service_project), - # https://cloud.google.com/compute/docs/reference/rest/beta/projects/enableXpnResource - 'type': 'compute.beta.xpnResource', - 'metadata': { - 'dependsOn': ['{}-api-compute.googleapis.com'.format(context.env['name'])] - }, - 'properties': - { - 'project': service_project, - 'xpnResource': - { - 'id': '$(ref.{}-project.projectId)'.format(context.env['name']), - 'type': 'PROJECT', - } - } - } - ) - elif properties.get('sharedVPCHost'): - resources.append( - { - 'name': '{}-xpn-host'.format(context.env['name']), - # https://cloud.google.com/compute/docs/reference/rest/beta/projects/enableXpnHost - 'type': 'compute.beta.xpnHost', - 'metadata': { - 'dependsOn': ['{}-api-compute.googleapis.com'.format(context.env['name'])] - }, - 'properties': { - 'project': '$(ref.{}-project.projectId)'.format(context.env['name']) - } - } - ) - - return resources diff --git a/dm/templates/project/project.py.schema b/dm/templates/project/project.py.schema deleted file mode 100644 index a246fd13c53..00000000000 --- a/dm/templates/project/project.py.schema +++ /dev/null @@ -1,353 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Project - author: Sourced Group Inc. - version: 1.2.5 - description: | - Supports creation of a single project. The project is created with a - billing account attached, permissions altered, APIs activated, and - service accounts created. - - For more information on this resource: - https://cloud.google.com/resource-manager/ - - APIs endpoints used by this template: - - gcp-types/cloudresourcemanager-v1:projects => - https://cloud.google.com/resource-manager/reference/rest/v1/projects/create - - deploymentmanager.v2.virtual.projectBillingInfo => - https://cloud.google.com/billing/reference/rest/v1/projects/updateBillingInfo - - gcp-types/servicemanagement-v1:servicemanagement.services.enable => - https://cloud.google.com/service-infrastructure/docs/service-management/reference/rest/v1/services/enable - - gcp-types/compute-v1:compute.subnetworks.setIamPolicy => - https://cloud.google.com/compute/docs/reference/rest/v1/subnetworks/setIamPolicy - - compute.beta.xpnHost => - https://cloud.google.com/compute/docs/reference/rest/beta/projects/enableXpnHost - - compute.beta.xpnResource => - https://cloud.google.com/compute/docs/reference/rest/beta/projects/enableXpnResource - - gcp-types/compute-v1:compute.networks.delete => - https://cloud.google.com/compute/docs/reference/rest/v1/networks/delete - - gcp-types/compute-v1:compute.firewalls.delete => - https://cloud.google.com/compute/docs/reference/rest/v1/firewalls/delete - - gcp-types/iam-v1:iam.projects.serviceAccounts.delete => - https://cloud.google.com/iam/reference/rest/v1/projects.serviceAccounts/delete - - gcp-types/compute-v1:compute.projects.setUsageExportBucket => - https://cloud.google.com/compute/docs/reference/rest/v1/projects/setUsageExportBucket - - gcp-types/storage-v1:buckets => - https://cloud.google.com/storage/docs/json_api/v1/buckets/insert - - gcp-types/iam-v1:projects.serviceAccounts => - https://cloud.google.com/iam/reference/rest/v1/projects.serviceAccounts/create - -imports: -- path: ../iam_member/iam_member.py - name: cft-iam_project_member.py - -additionalProperties: false - -required: - - billingAccountId - -oneOf: - - allOf: - - required: - - sharedVPCHost - - not: - required: - - sharedVPC - - not: - required: - - sharedVPCSubnets - - required: - - sharedVPC - - sharedVPCSubnets - not: - required: - - sharedVPCHost - - allOf: - - not: - required: - - sharedVPC - - not: - required: - - sharedVPCHost - - not: - required: - - sharedVPCSubnets - -dependencies: - sharedVPCSubnets: - required: - - sharedVPC - sharedVPC: - required: - - sharedVPCSubnets - -allOf: - - $ref: '#/definitions/networkAccess-requires-sharedVPCSubnets' - -definitions: - usageExportBucket-enabled: - type: boolean - description: | - Defines whether a usage export bucket must be created. - False by default so as to not inadvertently incur - costs to the user. It is strongly suggested to be enabled - (set to True). - networkAccess-requires-sharedVPCSubnets: - oneOf: - - $ref: '#/definitions/no-sharedVPCSubnets' - - $ref: '#/definitions/sharedVPCSubnets' - no-sharedVPCSubnets: - allOf: - - not: - required: - - sharedVPCSubnets - - properties: - serviceAccounts: - items: - properties: - networkAccess: - enum: - - False - groups: - items: - properties: - networkAccess: - enum: - - False - sharedVPCSubnets: - allOf: - - required: - - sharedVPCSubnets - - properties: - serviceAccounts: - items: - properties: - networkAccess: - enum: - - True - - False - groups: - items: - properties: - networkAccess: - enum: - - True - - False - -properties: - name: - type: string - description: | - The project name. If provided, configures the project to have a - human-readable name that is different from the project ID. - projectId: - type: string - pattern: ^[a-z][a-z0-9-]{5,28}[a-z0-9]$ - description: | - The unique, user-assigned ID of the Project. It must be 6 to 30 lowercase - letters, digits, or hyphens. It must start with a letter. Trailing - hyphens are prohibited. Read-only after creation. - Example: tokyo-rain-123 - parent: - type: object - additionalProperties: false - description: The parent of the project. - required: - - type - - id - properties: - type: - type: string - decription: The parent type (organization or folder). - enum: - - organization - - folder - default: organization - id: - type: [integer, string] - description: | - The ID of the project's parent. - pattern: ^[0-9]{8,25}$ - labels: - type: object - minProperties: 0 - maxProperties: 64 - additionalProperties: false - patternProperties: - ^[a-z](?:[-_a-z0-9]{0,61}[a-z0-9])?$ : - type: string - pattern: ^[a-z0-9]?(?:[-_a-z0-9]{0,61}[a-z0-9])?$ - description: | - Map of labels associated with this Project up to a maximum of 64. Keys have a minimum length of 1 - character and a maximum length of 63 characters, and cannot be empty. Values can be empty, and have - a maximum length of 63 characters. Keys and values can contain only lowercase letters, numeric - characters, underscores, and dashes. All characters must use UTF-8 encoding, and international characters - are allowed. - Example: - name: my-name - mass_not_eq_to_0: 1_3kg - count-next: '3' - empty-value: '' - billingAccountId: - type: string - description: | - The ID of the billing account to attach to the projects. - For example, 00E12A-0AB8B2-078CE8 - activateApis: - type: array - uniqueItems: True - items: - type: string - description: The list of APIs to enable for each project. - serviceAccounts: - type: array - uniqueItems: True - default: [] - items: - type: object - required: - - accountId - properties: - accountId: - type: string - pattern: ^[a-z]([-a-z0-9]{0,61}[a-z0-9])?$ - description: The name used to create the service account. - displayName: - type: string - description: | - The name to display for the service account. If not set, `accountId` - is used as the display name. - roles: - type: array - items: - type: string - description: The list of roles to grant the service account. - networkAccess: - type: boolean - default: False - description: | - If True, grants the shared VPC subnet IAM permissions - to the service account for the subnet specified by the - `sharedVPCSubnets.subnetId` field. - This field must not be set if `sharedVPCHost` is True. - groups: - type: array - uniqueItems: True - default: [] - items: - type: object - required: - - name - - roles - properties: - name: - type: string - description: The name of the Google group. - roles: - type: array - minItems: 1 - items: - type: string - description: The list of roles to grant the Google group. - networkAccess: - type: boolean - default: False - description: | - If True, grants the shared VPC subnet IAM permissions - to the group for the subnet specified by the - `sharedVPCSubnets.subnetId` field. - This field must not be set if `sharedVPCHost` is True. - concurrentApiActivation: - type: boolean - default: False - description: | - If True, activates all the requested APIs concurrently. - When set to False, the APIs are activated serially. - Concurrent activation makes for faster deployment but could potentially - fail the deployment exceeding the quota limits. Make sure to request - adequate quota before using this option. Serial activation tends to be - significantly slower. - sharedVPC: - type: string - description: | - Name of the Shared VPC Host Project that this project will participate - in. The `sharedVPCHost` property cannot be set if this property is set. - sharedVPCSubnets: - type: array - uniqueItems: True - description: | - The IDs of specific shared VPC subnets to share in the new project. - minItems: 1 - items: - type: object - required: - - subnetId - - region - properties: - subnetId: - type: string - description: | - The ID of the subnet to set IAM policies on. For example, - `test-subnetwork-1`.The name of the Google group. - region: - type: string - description: The region of the subnet. - sharedVPCHost: - type: boolean - description: | - If set to True, indicates that this project is to be used as a - host project for Shared VPC networks. - The `sharedVPC` and `sharedVPCSubnets` properties cannot be set if this - property is set. - enableGKEToUseSharedVPC: - type: boolean - default: False - description: | - If this project is a SharedVPC Guest project the - *service-PROJECTNUMBER@container-engine-robot.iam.gserviceaccount.com* - ServiceAccount needs to be added to the shared subnets with the - roles/compute.networkUser IAM binding. - If true, *container.googleapis.com* will be enabled. - If this project is *NOT* a guest project, this value is ignored. - -outputs: - projectId: - type: string - description: The unique, user-assigned ID of the Project. - projectNumber: - type: string - description: The number uniquely identifying the project. - containerSA: - type: string - description: The built-in ServieAccount name for container services. (With 'śerviceAccount:' prefix.) ( Only exists if container.googleapis.com is enabled.) - containerSADisplayName: - type: string - description: The built-in ServieAccount name for container services. ( Only exists if container.googleapis.com is enabled.) - serviceAccountDisplayName: - type: string - description: Name of the default service account for the project. - resources: - type: array - description: | - Names of the resources the template creates. This output can be used - by other templates for explicit waiting for all project configuration - steps to finish. - -documentation: - - templates/project/README.md - -examples: - - templates/project/examples/project.yaml diff --git a/dm/templates/project/tests/integration/project.bats b/dm/templates/project/tests/integration/project.bats deleted file mode 100644 index f8e5c1d11ed..00000000000 --- a/dm/templates/project/tests/integration/project.bats +++ /dev/null @@ -1,170 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - export CLOUD_FOUNDATION_PROJECT_ID=$(echo ${CLOUD_FOUNDATION_PROJECT_ID} | cut -c 1-10) - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < "templates/project/tests/integration/${TEST_NAME}.yaml" > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; this is executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; this is executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - delete_config - rm -f "${RANDOM_FILE}" - fi - - # Per-test teardown steps. -} - - -########## TESTS ########## - -@test "Deploying project $DEPLOYMENT_NAME" { - run gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" --config "${CONFIG}" - - echo "Status: $status" - echo "Output: $output" - - [[ "$status" -eq 0 ]] -} - -@test "Verifying that project $CLOUD_FOUNDATION_PROJECT_ID was created" { - run gcloud projects list - - echo "Status: $status" - echo "Output: $output" - - [[ "$output" =~ "${CLOUD_FOUNDATION_PROJECT_ID}-${RAND}" ]] -} - -@test "Verifying that APIs were activated for project ${CLOUD_FOUNDATION_PROJECT_ID}-${RAND}" { - run gcloud services list --project "${CLOUD_FOUNDATION_PROJECT_ID}-${RAND}" - - echo "Status: $status" - echo "Output: $output" - - [[ "$output" =~ "compute.googleapis.com" ]] - [[ "$output" =~ "deploymentmanager.googleapis.com" ]] - [[ "$output" =~ "pubsub.googleapis.com" ]] - - # ensure storage API is enabled when usageExportBucket is true - [[ "$output" =~ "storage-component.googleapis.com" ]] -} - -@test "Verifying that usage report export to the bucket was created for project ${CLOUD_FOUNDATION_PROJECT_ID}-${RAND}" { - run gcloud compute project-info describe --project "${CLOUD_FOUNDATION_PROJECT_ID}-${RAND}" \ - --format="flattened[no-pad](usageExportLocation)" - - echo "Status: $status" - echo "Output: $output" - - [[ "$output" =~ "${CLOUD_FOUNDATION_PROJECT_ID}-${RAND}-usage-export" ]] -} - -@test "Verifying that the project is a shared vpc host project for project ${CLOUD_FOUNDATION_PROJECT_ID}-${RAND}" { - run gcloud compute shared-vpc organizations list-host-projects "${CLOUD_FOUNDATION_ORGANIZATION_ID}" - - echo "Status: $status" - echo "Output: $output" - - [[ "$output" =~ "${CLOUD_FOUNDATION_PROJECT_ID}-${RAND}" ]] -} - -@test "Verifying that the default VPC was deleted for project ${CLOUD_FOUNDATION_PROJECT_ID}-${RAND}" { - run gcloud compute networks list --project "${CLOUD_FOUNDATION_PROJECT_ID}-${RAND}" - - echo "Status: $status" - echo "Output: $output" - - [[ ! "$output" =~ "default" ]] -} - -@test "Verifying that the default Compute Engine SA was removed for project ${CLOUD_FOUNDATION_PROJECT_ID}-${RAND}" { - run gcloud iam service-accounts list --project "${CLOUD_FOUNDATION_PROJECT_ID}-${RAND}" - - echo "Status: $status" - echo "Output: $output" - - [[ ! "$output" =~ "Compute Engine default service account" ]] -} - -@test "Verifying that the service accounts were granted project IAM permissions for project ${CLOUD_FOUNDATION_PROJECT_ID}-${RAND}" { - run gcloud projects get-iam-policy "${CLOUD_FOUNDATION_PROJECT_ID}-${RAND}" \ - --flatten="bindings[].members" \ - --format='table(bindings.role)' \ - --filter="bindings.members:sa-${RAND}@${CLOUD_FOUNDATION_PROJECT_ID}-${RAND}.iam.gserviceaccount.com" - - echo "Status: $status" - echo "Output: $output" - - [[ "$output" =~ "roles/editor" ]] - [[ "$output" =~ "roles/viewer" ]] -} - -@test "Deleting deployment" { - ## TODO project creation should work without disabling XPN hosts. - - run gcloud alpha resource-manager liens list --project "${CLOUD_FOUNDATION_PROJECT_ID}-${RAND}" - - echo "Status: $status" - echo "Output: $output" - [[ "$status" -eq 0 ]] - - run gcloud compute shared-vpc disable "${CLOUD_FOUNDATION_PROJECT_ID}-${RAND}" - - echo "Status: $status" - echo "Output: $output" - [[ "$status" -eq 0 ]] - - run gcloud alpha resource-manager liens list --project "${CLOUD_FOUNDATION_PROJECT_ID}-${RAND}" - - echo "Status: $status" - echo "Output: $output" - [[ "$status" -eq 0 ]] - - run gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" -q - - echo "Status: $status" - echo "Output: $output" - [[ "$status" -eq 0 ]] - - run gcloud projects list - [[ ! "$output" =~ "${CLOUD_FOUNDATION_PROJECT_ID}-${RAND}" ]] -} diff --git a/dm/templates/project/tests/integration/project.yaml b/dm/templates/project/tests/integration/project.yaml deleted file mode 100644 index 6d8cab34243..00000000000 --- a/dm/templates/project/tests/integration/project.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Test of the project template. -# -# Variables: -# RAND: a random string used by the testing suite. - -imports: - - path: templates/project/project.py - name: project.py - -resources: - - name: ${CLOUD_FOUNDATION_PROJECT_ID}-${RAND} - type: project.py - properties: - name: ${CLOUD_FOUNDATION_PROJECT_ID}-${RAND} - projectId: ${CLOUD_FOUNDATION_PROJECT_ID}-${RAND} - parent: - id: ${CLOUD_FOUNDATION_ORGANIZATION_ID} - billingAccountId: ${CLOUD_FOUNDATION_BILLING_ACCOUNT_ID} - activateApis: - - compute.googleapis.com - - deploymentmanager.googleapis.com - - pubsub.googleapis.com - serviceAccounts: - - accountId: sa-${RAND} - roles: - - roles/editor - - roles/viewer - sharedVPCHost: True diff --git a/dm/templates/project/tests/schemas/invalid_project.yaml b/dm/templates/project/tests/schemas/invalid_project.yaml deleted file mode 100644 index 4e844186ac1..00000000000 --- a/dm/templates/project/tests/schemas/invalid_project.yaml +++ /dev/null @@ -1,24 +0,0 @@ -parent: - type: folder - id: FOOBAR -billingAccountId: FOOBAR -# activateApis: -# - compute.googleapis.com -serviceAccounts: - - accountId: test-sa-1 - displayName: test service account 1 - roles: - - roles/editor - - roles/viewer - networkAccess: true -groups: - - name: test-group - roles: - - roles/editor - - roles/viewer - networkAccess: false -sharedVPCHost: true -# sharedVPC: FOOBAR -# sharedVPCSubnets: -# - subnetId: default -# region: europe-west1 diff --git a/dm/templates/pubsub/README.md b/dm/templates/pubsub/README.md deleted file mode 100644 index 4d108516f23..00000000000 --- a/dm/templates/pubsub/README.md +++ /dev/null @@ -1,67 +0,0 @@ -# Pubsub - -This template creates a Pub/Sub (publish-subscribe) service. - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) -- Grant the [pubsub.admin](https://cloud.google.com/pubsub/docs/access-control) -IAM role to the Deployment Manager service account - -## Deployment - -### Resources - -- [pubsub.v1.topic](https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.topics) -- [pubsub.v1.subscription](https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions) - -### Properties - -See the `properties` section in the schema file(s): - -- [Pub/Sub](pubsub.py.schema) - -### Usage - -1. Clone the [Deployment Manager samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit): - -```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit -``` - -2. Go to the [dm](../../) directory: - -```shell - cd dm -``` - -3. Copy the example DM config to be used as a model for the deployment; in this case, [examples/pubsub.yaml](examples/pubsub.yaml): - -```shell - cp templates/pubsub/examples/pubsub.yaml my_pubsub.yaml -``` - -4. Change the values in the config file to match your specific GCP setup (for properties, refer to the schema files listed above): - -```shell - vim my_pubsub.yaml # <== change values to match your GCP setup -``` - -5. Create your deployment (replace with the relevant deployment name): - -```shell - gcloud deployment-manager deployments create \ - --config my_pubsub.yaml -``` - -6. In case you need to delete your deployment: - -```shell - gcloud deployment-manager deployments delete -``` - -## Examples - -- [Pub/Sub](examples/pubsub.yaml) -- [Pub/Sub with PUSH subscription](examples/pubsub_push.yaml) diff --git a/dm/templates/pubsub/examples/pubsub.yaml b/dm/templates/pubsub/examples/pubsub.yaml deleted file mode 100644 index 36e72a21a55..00000000000 --- a/dm/templates/pubsub/examples/pubsub.yaml +++ /dev/null @@ -1,22 +0,0 @@ -# Example of the PubSub template usage. -# -# In this example, a 'test-topic' is created with two PULL subscriptions. -# IAM policies are assigned to the topic and to one of the subscriptions. - -imports: - - path: templates/pubsub/pubsub.py - name: pubsub.py - -resources: - - name: test-pubsub - type: pubsub.py - properties: - name: test-topic - accessControl: - - role: roles/pubsub.subscriber - members: - - user:demo@user.com - subscriptions: - - name: first-subscription - - name: second-subscription - ackDeadlineSeconds: 15 diff --git a/dm/templates/pubsub/examples/pubsub_push.yaml b/dm/templates/pubsub/examples/pubsub_push.yaml deleted file mode 100644 index 576b968e78f..00000000000 --- a/dm/templates/pubsub/examples/pubsub_push.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# Example on how to use the Pub/Sub template -# -# In this example, a 'test-topic' is created with one PUSH subscription. -# Replace the following placeholders with valid values: -# : a URL where the messages will be pushed to -# -imports: - - path: templates/pubsub/pubsub.py - name: pubsub.py - -resources: - - name: test-push-pubsub - type: pubsub.py - properties: - name: test-topic - subscriptions: - - name: push-subscription - pushEndpoint: diff --git a/dm/templates/pubsub/pubsub.py b/dm/templates/pubsub/pubsub.py deleted file mode 100644 index f496a32b36e..00000000000 --- a/dm/templates/pubsub/pubsub.py +++ /dev/null @@ -1,102 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" This template creates a Pub/Sub (publish-subscribe) service. """ - -from hashlib import sha1 -import json - - -def set_optional_property(destination, source, prop_name): - """ Copies the property value if present. """ - - if prop_name in source: - destination[prop_name] = source[prop_name] - -def create_subscription(resource_name, project_id, spec): - """ Create a pull/push subscription from the simplified spec. """ - - suffix = 'subscription-{}'.format(sha1((resource_name + json.dumps(spec)).encode('utf-8')).hexdigest()[:10]) - - subscription = { - 'name': '{}-{}'.format(resource_name, suffix), - # https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions - 'type': 'gcp-types/pubsub-v1:projects.subscriptions', - 'properties':{ - 'subscription': spec.get('name', suffix), - 'name': 'projects/{}/subscriptions/{}'.format(project_id, spec.get('name', suffix)), - 'topic': '$(ref.{}.name)'.format(resource_name) - } - } - resources_list = [subscription] - - optional_properties = [ - 'labels', - 'pushConfig', - 'ackDeadlineSeconds', - 'retainAckedMessages', - 'messageRetentionDuration', - 'expirationPolicy', - ] - - for prop in optional_properties: - set_optional_property(subscription['properties'], spec, prop) - - push_endpoint = spec.get('pushEndpoint') - if push_endpoint is not None: - subscription['properties']['pushConfig'] = { - 'pushEndpoint': push_endpoint, - } - - return resources_list - -def generate_config(context): - """ Entry point for the deployment resources. """ - - properties = context.properties - name = properties.get('name', properties.get('topic', context.env['name'])) - project_id = properties.get('project', context.env['project']) - - topic = { - 'name': context.env['name'], - # https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.topics - 'type': 'gcp-types/pubsub-v1:projects.topics', - 'properties':{ - 'topic': name, - 'name': 'projects/{}/topics/{}'.format(project_id, name), - } - } - resources_list = [topic] - - optional_properties = [ - 'labels', - ] - - for prop in optional_properties: - set_optional_property(topic['properties'], properties, prop) - - - subscription_specs = properties.get('subscriptions', []) - - for spec in subscription_specs: - resources_list = resources_list + create_subscription(context.env['name'], project_id, spec) - - return { - 'resources': resources_list, - 'outputs': [ - { - 'name': 'topicName', - 'value': '$(ref.{}.name)'.format(context.env['name']) - } - ], - } diff --git a/dm/templates/pubsub/pubsub.py.schema b/dm/templates/pubsub/pubsub.py.schema deleted file mode 100644 index 5991f487dec..00000000000 --- a/dm/templates/pubsub/pubsub.py.schema +++ /dev/null @@ -1,208 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Pub/Sub (publish-subscribe) service - version: 1.1.0 - author: Sourced Group Inc. - description: | - Creates a topic, optionally with multiple subscriptions. - - For more information on this resource: - - https://cloud.google.com/pubsub/ - - APIs endpoints used by this template: - - gcp-types/pubsub-v1:projects.subscriptions => - https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions - - gcp-types/pubsub-v1:projects.topics => - https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.topics - -additionalProperties: false - -oneOf: - - required: - - name - - required: - - topic - -properties: - name: - type: string - description: | - The name of the topic that will publish messages. Resource name would be used if omitted. - topic: - type: string - description: | - The name of the topic that will publish messages. If not specified, - the deployment name is used. - DEPRECATED. - project: - type: string - description: | - The project ID of the project containing PubSub resources. The - Google apps domain is prefixed if applicable. - labels: - type: object - description: | - An object containing a list of "key": value pairs. - - Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. - subscriptions: - type: array - uniqueItems: True - description: | - A list of topic's subscriptions. - item: - type: object - additionalProperties: false - description: | - The topic's subscription. - oneOf: - - required: - - pushEndpoint - - required: - - pushConfig - properties: - name: - type: string - description: | - The subscription name. Resource name would be used if omitted. - pushEndpoint: - type: string - description: | - The URL of the endpoint to push the messages to. - pushConfig: - type: object - additionalProperties: false - description: | - If push delivery is used with this subscription, this field is used to configure it. - An empty pushConfig signifies that the subscriber will pull and ack messages using API methods. - required: - - pushEndpoint - properties: - pushEndpoint: - type: string - description: | - A URL locating the endpoint to which messages should be pushed. - For example, a Webhook endpoint might use "https://example.com/push". - oidcToken: - type: object - description: | - If specified, Pub/Sub will generate and attach an OIDC JWT token as an Authorization header - in the HTTP request for every pushed message. - properties: - serviceAccountEmail: - type: string - description: | - Service account email to be used for generating the OIDC token. The caller - (for subscriptions.create, subscriptions.patch, and subscriptions.modifyPushConfig RPCs) - must have the iam.serviceAccounts.actAs permission for the service account. - audience: - type: string - description: | - Audience to be used when generating OIDC token. The audience claim identifies the recipients - that the JWT is intended for. The audience value is a single case-sensitive string. - Having multiple values (array) for the audience field is not supported. - More info about the OIDC JWT token audience here: https://tools.ietf.org/html/rfc7519#section-4.1.3 - Note: if not specified, the Push endpoint URL will be used. - attributes: - type: object - description: | - Endpoint configuration attributes. - - Every endpoint has a set of API supported attributes that can be used to control different - aspects of the message delivery. - - The currently supported attribute is x-goog-version, which you can use to change the format - of the pushed message. This attribute indicates the version of the data expected by the endpoint. - This controls the shape of the pushed message (i.e., its fields and metadata). - The endpoint version is based on the version of the Pub/Sub API. - - If not present during the subscriptions.create call, it will default to the version of the - API used to make such call. If not present during a subscriptions.modifyPushConfig call, - its value will not be changed. subscriptions.get calls will always return a valid version, - even if the subscription was created without this attribute. - - The possible values for this attribute are: - - v1beta1: uses the push format defined in the v1beta1 Pub/Sub API. - v1 or v1beta2: uses the push format defined in the v1 Pub/Sub API. - An object containing a list of "key": value pairs. - Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. - ackDeadlineSeconds: - type: integer - description: | - The approximate amount of time (on a best-effort basis) Pub/Sub waits for the subscriber to acknowledge - receipt before resending the message. In the interval after the message is delivered and - before it is acknowledged, it is considered to be outstanding. - During that time period, the message will not be redelivered (on a best-effort basis). - - For pull subscriptions, this value is used as the initial value for the ack deadline. To override this - value for a given message, call subscriptions.modifyAckDeadline with the corresponding ackId if using - non-streaming pull or send the ackId in a StreamingModifyAckDeadlineRequest if using streaming pull. - The minimum custom deadline you can specify is 10 seconds. The maximum custom deadline you can specify - is 600 seconds (10 minutes). If this parameter is 0, a default value of 10 seconds is used. - - For push delivery, this value is also used to set the request timeout for the call to the push endpoint. - - If the subscriber never acknowledges the message, the Pub/Sub system will eventually redeliver the message. - minimum: 0 - maximum: 600 - retainAckedMessages: - type: bool - description: | - Indicates whether to retain acknowledged messages. If true, then messages are not expunged from the - subscription's backlog, even if they are acknowledged, until they fall out of the - messageRetentionDuration window. This must be true if you would like to subscriptions.seek to a timestamp. - messageRetentionDuration: - type: string - description: | - How long to retain unacknowledged messages in the subscription's backlog, from the moment a message - is published. If retainAckedMessages is true, then this also configures the retention of - acknowledged messages, and thus configures how far back in time a subscriptions.seek can be done. - Defaults to 7 days. Cannot be more than 7 days or less than 10 minutes. - - A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". - expirationPolicy: - type: object - description: | - A policy that specifies the conditions for this subscription's expiration. A subscription is - considered active as long as any connected subscriber is successfully consuming messages from - the subscription or is issuing operations on the subscription. If expirationPolicy is not set, - a default policy with ttl of 31 days will be used. The minimum allowed value - for expirationPolicy.ttl is 1 day. - required: - - ttl - properties: - ttl: - type: string - description: | - Specifies the "time-to-live" duration for an associated resource. The resource expires if it is - not active for a period of ttl. The definition of "activity" depends on the type of - the associated resource. The minimum and maximum allowed values for ttl depend on the type - of the associated resource, as well. If ttl is not set, the associated resource never expires. - - A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". - -outputs: - topicName: - type: string - description: The created topic's name. - -documentation: - - templates/pubsub/README.md - -examples: - - templates/pubsub/examples/pubsub.yaml - - templates/pubsub/examples/pubsub_push.yaml diff --git a/dm/templates/pubsub/tests/integration/pubsub.bats b/dm/templates/pubsub/tests/integration/pubsub.bats deleted file mode 100755 index 2956a5e44e5..00000000000 --- a/dm/templates/pubsub/tests/integration/pubsub.bats +++ /dev/null @@ -1,119 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < "templates/pubsub/tests/integration/${TEST_NAME}.yaml" > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; this is executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; this is executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - delete_config - fi - - # Per-test teardown steps. -} - - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config "${CONFIG}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -} - -@test "Verifying that test-topic-${RAND} was created in deployment ${DEPLOYMENT_NAME}" { - run gcloud pubsub topics list --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$output" =~ "test-topic-${RAND}" ]] -} - -@test "Verifying that test-topic-${RAND}'s IAM policy was set" { - run gcloud beta pubsub topics get-iam-policy test-topic-${RAND} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$output" =~ "demo@user.com" ]] -} - -@test "Verifying that two subscriptions were created in deployment ${DEPLOYMENT_NAME}" { - run gcloud pubsub subscriptions list --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$output" =~ "first-subscription-${RAND}" ]] - [[ "$output" =~ "second-subscription-${RAND}" ]] -} - -@test "Verifying that first-subscription-${RAND}'s topic is test-topic-${RAND}" { - run gcloud pubsub subscriptions describe first-subscription-${RAND} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$output" =~ "test-topic-${RAND}" ]] -} - -@test "Verifying that first-subscription-${RAND}'s IAM policy was set" { - run gcloud beta pubsub subscriptions get-iam-policy first-subscription-${RAND} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$output" =~ "demo@user.com" ]] -} - -@test "Verifying that second-subscription-${RAND}'s topic is test-topic-${RAND}" { - run gcloud pubsub subscriptions describe second-subscription-${RAND} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$output" =~ "test-topic-${RAND}" ]] -} - -@test "Verifying that second-subscription-${RAND}'s ackDeadlineSeconds was set" { - run gcloud pubsub subscriptions describe second-subscription-${RAND} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$output" =~ "ackDeadlineSeconds: 15" ]] -} - -@test "Verifying that second-subscription-${RAND}'s expiration policy was set" { - run gcloud pubsub subscriptions describe second-subscription-${RAND} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$output" =~ "expirationPolicy:" ]] - [[ "$output" =~ "ttl: 86400s" ]] -} - -@test "Deleting deployment" { - gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" -q \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - run gcloud pubsub topics list --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ ! "$output" =~ "test-topic-${RAND}" ]] - - run gcloud pubsub subscriptions list --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ ! "$output" =~ "first-subscription-${RAND}" ]] - [[ ! "$output" =~ "second-subscription-${RAND}" ]] -} diff --git a/dm/templates/pubsub/tests/integration/pubsub.yaml b/dm/templates/pubsub/tests/integration/pubsub.yaml deleted file mode 100644 index 3d5bb29ba13..00000000000 --- a/dm/templates/pubsub/tests/integration/pubsub.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# Test of the PubSub template. -# -# Variables: -# RAND: A random string used by the testing suite. - -imports: - - path: templates/pubsub/pubsub.py - name: pubsub.py - -resources: - - name: test-pubsub-${RAND} - type: pubsub.py - properties: - name: test-topic-${RAND} - accessControl: - - role: roles/pubsub.subscriber - members: - - user:demo@user.com - subscriptions: - - name: first-subscription-${RAND} - - name: second-subscription-${RAND} - ackDeadlineSeconds: 15 - expirationPolicy: - ttl: 86400s diff --git a/dm/templates/resource_policy/README.md b/dm/templates/resource_policy/README.md deleted file mode 100644 index 4b73514c69d..00000000000 --- a/dm/templates/resource_policy/README.md +++ /dev/null @@ -1,64 +0,0 @@ -# Resource Policy - -This template creates a resource policy. - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) -- Enable the [Compute Engine API](https://cloud.google.com/compute/docs/reference/rest/v1/) -- Make sure that the [Google APIs service account](https://cloud.google.com/deployment-manager/docs/access-control#access_control_for_deployment_manager) has *compute.resourcePolicies.create* permissions - -## Deployment - -### Resources - -- [gcp-types/compute-v1:resourcePolicies](https://cloud.google.com/compute/docs/reference/rest/v1/resourcePolicies/insert) - -### Properties - -See the `properties` section in the schema file(s): -- [Resource Policy](resource_policy.py.schema) - -### Usage - -1. Clone the [Deployment Manager Samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit): - -```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit -``` - -2. Go to the [dm](../../) directory: - -```shell - cd cloud-foundation-toolkit/dm -``` - -3. Copy the example DM config to be used as a model for the deployment; in this case, [examples/resource\_policy.yaml](examples/resource_policy.yaml): - -```shell - cp templates/resource_policy/examples/resource_policy.yaml my_resource_policy.yaml -``` - -4. Change the values in the config file to match your specific GCP setup (for properties, refer to the schema files listed above): - -```shell - vim my_resource_policy.yaml # <== change values to match your GCP setup -``` - -5. Create your deployment (replace with the relevant deployment name): - -```shell - gcloud deployment-manager deployments create \ - --config my_resource_policy.yaml -``` - -6. In case you need to delete your deployment: - -```shell - gcloud deployment-manager deployments delete -``` - -## Examples - -- [Resource Policy](examples/resource_policy.yaml) diff --git a/dm/templates/resource_policy/examples/resource_policy.yaml b/dm/templates/resource_policy/examples/resource_policy.yaml deleted file mode 100644 index 57a2a68cac3..00000000000 --- a/dm/templates/resource_policy/examples/resource_policy.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# Example of the Resource Policy template usage. -# -# In this example, a Resource Policy is created from the existing source. -# -# Replace the following placeholders with valid values: -# : a region where the resource resides -# : a resource to apply resource policy - -imports: - - path: templates/resource_policy/resource_policy.py - name: resource_policy.py - -resources: - - name: test-resourcepolicy - type: resource_policy.py - properties: - region: - resource: - description: | - Test Resource Policy - snapshotSchedulePolicy: - schedule: - weeklySchedule: - dayOfWeeks: - - day: "MONDAY" - startTime: "00:00" diff --git a/dm/templates/resource_policy/resource_policy.py b/dm/templates/resource_policy/resource_policy.py deleted file mode 100644 index 68d9fb9c83e..00000000000 --- a/dm/templates/resource_policy/resource_policy.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright 2019 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" This template creates a Resource Policy. """ - - -def generate_config(context): - """ Entry point for the deployment resources. """ - - resources = [] - properties = context.properties - project_id = properties.get('project', context.env['project']) - name = properties.get('name', context.env['name']) - region = properties['region'] - resource_name = properties['resource'] - policy = properties['snapshotSchedulePolicy'] - - resource = { - 'name': name, - # https://cloud.google.com/compute/docs/reference/rest/v1/resourcePolicies/insert - 'type': 'gcp-types/compute-v1:resourcePolicies', - 'properties': { - 'project': project_id, - 'name': resource_name, - 'region': region, - 'snapshotSchedulePolicy': policy - } - } - - resources.append(resource) - - return { - 'resources': - resources, - 'outputs': - [ - { - 'name': 'name', - 'value': name - } - ] - } diff --git a/dm/templates/resource_policy/resource_policy.py.schema b/dm/templates/resource_policy/resource_policy.py.schema deleted file mode 100644 index cc1c5dfb51f..00000000000 --- a/dm/templates/resource_policy/resource_policy.py.schema +++ /dev/null @@ -1,225 +0,0 @@ -# Copyright 2019 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Resource Policy - author: Sourced Group Inc. - version: 1.0.0 - description: | - Supports creation of a Resource Policy. - - For more information on this resource: - https://cloud.google.com/compute/docs/reference/rest/v1/resourcePolicies. - - APIs endpoints used by this template: - - gcp-types/compute-v1:resourcePolicies => - https://cloud.google.com/compute/docs/reference/rest/v1/resourcePolicies/insert - -additionalProperties: false - -required: - - snapshotSchedulePolicy - - resource - - region - -properties: - description: - type: string - resource: - description: | - The name of the resource, provided by the client when initially - creating the resource. The resource name must be 1-63 characters long, - and comply with RFC1035. Specifically, the name must be 1-63 characters - long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which - means the first character must be a lowercase letter, and all following - characters must be a dash, lowercase letter, or digit, except the last - character, which cannot be a dash. - pattern: '[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?' - type: string - project: - type: string - description: | - The project ID of the project to create Resource Policy. - region: - description: | - Name of the region for this request. - pattern: '[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?' - type: string - snapshotSchedulePolicy: - $ref: '#/schemas/ResourcePolicySnapshotSchedulePolicy' - description: | - Resource policy for persistent disks for creating snapshots. - -schemas: - ResourcePolicyDailyCycle: - description: | - Time window specified for daily operations. - id: '#ResourcePolicyDailyCycle' - properties: - daysInCycle: - description: Defines a schedule that runs every nth day of the month. - format: int32 - type: integer - startTime: - description: | - Start time of the window. This must be in UTC format that - resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For - example, both 13:00-5 and 08:00 are valid. - type: string - type: object - ResourcePolicyHourlyCycle: - description: | - Time window specified for hourly operations. - id: '#ResourcePolicyHourlyCycle' - properties: - hoursInCycle: - description: Allows to define schedule that runs every nth hour. - format: int32 - type: integer - startTime: - description: | - 'Time within the window to start the operations. It must - be in format "HH:MM", where HH : [00-23] and MM : [00-00] GMT.' - type: string - type: object - ResourcePolicySnapshotSchedulePolicy: - description: | - A snapshot schedule policy specifies when and how frequently - snapshots are to be created for the target disk. Also specifies how many - and how long these scheduled snapshots should be retained. - id: '#ResourcePolicySnapshotSchedulePolicy' - properties: - retentionPolicy: - $ref: '#/schemas/ResourcePolicySnapshotSchedulePolicyRetentionPolicy' - description: | - Retention policy applied to snapshots created by this resource - policy. - schedule: - $ref: '#/schemas/ResourcePolicySnapshotSchedulePolicySchedule' - description: | - A Vm Maintenance Policy specifies what kind of infrastructure - maintenance we are allowed to perform on this VM and when. Schedule - that is applied to disks covered by this policy. - snapshotProperties: - $ref: '#/schemas/ResourcePolicySnapshotSchedulePolicySnapshotProperties' - description: | - Properties with which snapshots are created such as labels, - encryption keys. - type: object - ResourcePolicySnapshotSchedulePolicyRetentionPolicy: - description: | - Policy for retention of scheduled snapshots. - id: '#ResourcePolicySnapshotSchedulePolicyRetentionPolicy' - properties: - maxRetentionDays: - description: | - Maximum age of the snapshot that is allowed to be kept. - format: int32 - type: integer - onSourceDiskDelete: - description: | - Specifies the behavior to apply to scheduled snapshots - when the source disk is deleted. - enum: - - APPLY_RETENTION_POLICY - - KEEP_AUTO_SNAPSHOTS - - UNSPECIFIED_ON_SOURCE_DISK_DELETE - enumDescriptions: - - '' - - '' - - '' - type: string - type: object - ResourcePolicySnapshotSchedulePolicySchedule: - description: | - A schedule for disks where the schedueled operations are performed. - id: '#ResourcePolicySnapshotSchedulePolicySchedule' - properties: - dailySchedule: - $ref: '#/schemas/ResourcePolicyDailyCycle' - hourlySchedule: - $ref: '#/schemas/ResourcePolicyHourlyCycle' - weeklySchedule: - $ref: '#/schemas/ResourcePolicyWeeklyCycle' - type: object - ResourcePolicySnapshotSchedulePolicySnapshotProperties: - description: | - Specified snapshot properties for scheduled snapshots created - by this policy. - id: '#ResourcePolicySnapshotSchedulePolicySnapshotProperties' - properties: - guestFlush: - description: | - Indication to perform a ?guest aware? snapshot. - type: boolean - labels: - additionalProperties: - type: string - description: | - Labels to apply to scheduled snapshots. These can be later - modified by the setLabels method. Label values may be empty. - type: object - storageLocations: - description: | - Cloud Storage bucket storage location of the auto snapshot - (regional or multi-regional). - items: - type: string - type: array - type: object - ResourcePolicyWeeklyCycle: - description: | - Time window specified for weekly operations. - id: '#ResourcePolicyWeeklyCycle' - properties: - dayOfWeeks: - description: | - Up to 7 intervals/windows, one for each day of the week. - items: - $ref: '#/schemas/ResourcePolicyWeeklyCycleDayOfWeek' - type: array - type: object - ResourcePolicyWeeklyCycleDayOfWeek: - id: '#ResourcePolicyWeeklyCycleDayOfWeek' - properties: - day: - description: | - Allows to define schedule that runs specified day of the - week. - enum: - - MONDAY - - TUESDAY - - WEDNESDAY - - THURSDAY - - FRIDAY - - SATURDAY - - SUNDAY - type: string - startTime: - description: | - 'Time within the window to start the operations. It must - be in format "HH:MM", where HH : [00-23] and MM : [00-00] GMT.' - type: string - type: object - -outputs: - name: - type: string - description: The resource policy name. - -documentation: - - templates/resource_policy/README.md - -examples: - - templates/resource_policy/examples/resource_policy.yaml diff --git a/dm/templates/resource_policy/tests/integration/resource_policy.bats b/dm/templates/resource_policy/tests/integration/resource_policy.bats deleted file mode 100755 index 1c49c4bd0fa..00000000000 --- a/dm/templates/resource_policy/tests/integration/resource_policy.bats +++ /dev/null @@ -1,90 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" - # test specific variables - export REGION="us-east1" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < "templates/resource_policy/tests/integration/${TEST_NAME}.yaml" > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; this is executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; this is executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - delete_config - fi - - # Per-test teardown steps. -} - - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config "${CONFIG}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -} - -@test "Verifying that test-res-policy-inst-${RAND} was created in deployment ${DEPLOYMENT_NAME}" { - run gcloud compute resource-policies list \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --filter="region:( ${REGION} )" - [[ "$output" =~ "test-res-policy-inst-${RAND}" ]] -} - -@test "Verifying resource policy test-res-policy-inst-${RAND}" { - run gcloud compute resource-policies describe test-res-policy-inst-${RAND} \ - --region="${REGION}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "day: MONDAY" ]] - [[ "$output" =~ "startTime: 00:00" ]] -} - -@test "Deleting deployment" { - gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" -q \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - # Due to deployment does not delete resource policy it needs to be removed via CLI tool - gcloud compute resource-policies delete test-res-policy-inst-${RAND} \ - --region="${REGION}" - - run gcloud compute resource-policies list \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --filter="region:( ${REGION} )" - [[ ! "$output" =~ "test-res-policy-inst-${RAND}" ]] -} diff --git a/dm/templates/resource_policy/tests/integration/resource_policy.yaml b/dm/templates/resource_policy/tests/integration/resource_policy.yaml deleted file mode 100644 index b5715e100b4..00000000000 --- a/dm/templates/resource_policy/tests/integration/resource_policy.yaml +++ /dev/null @@ -1,50 +0,0 @@ -# Test of the Resource Policy template. -# -# Variables: -# RAND: A random string used by the testing suite. -# REGION: A region to deploy instance with resource policy. - -imports: - - path: templates/resource_policy/resource_policy.py - name: resource_policy.py - - path: templates/instance/instance.py - name: instance.py - -resources: - - name: test-resource-policy-${RAND} - type: resource_policy.py - properties: - region: ${REGION} - resource: $(ref.instance-res-pol-${RAND}.name) - snapshotSchedulePolicy: - schedule: - weeklySchedule: - dayOfWeeks: - - day: "MONDAY" - startTime: "00:00" - - -# Test prerequisites: - - name: instance-res-pol-${RAND} - type: instance.py - properties: - zone: ${REGION}-b - name: test-res-policy-inst-${RAND} - diskImage: projects/ubuntu-os-cloud/global/images/family/ubuntu-1804-lts - machineType: f1-micro - canIpForward: true - diskType: pd-ssd - diskSizeGb: 50 - networks: - - network: $(ref.test-network-0-${RAND}.selfLink) - subnetwork: $(ref.test-subnetwork-0-${RAND}.selfLink) - - name: test-network-0-${RAND} - type: compute.v1.network - properties: - autoCreateSubnetworks: false - - name: test-subnetwork-0-${RAND} - type: compute.v1.subnetwork - properties: - network: $(ref.test-network-0-${RAND}.selfLink) - ipCidrRange: 10.0.1.0/24 - region: ${REGION} diff --git a/dm/templates/route/README.md b/dm/templates/route/README.md deleted file mode 100644 index 20cef8a67e1..00000000000 --- a/dm/templates/route/README.md +++ /dev/null @@ -1,68 +0,0 @@ -# Route - -This template creates a route. - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) -- Create a [network](../network/README.md) -- Optionally, create one or more of the following based on your traffic destination ([see examples/route.yaml](examples/route.yaml)): - - [VM](../vm/vm.md) - - [VPN Tunnel](../vpn/README.md) -- Grant the [compute.networkAdmin or compute.admin](https://cloud.google.com/compute/docs/access/iam) IAM role to the project service account - -## Deployment - -### Resources - -- [compute.v1.route](https://cloud.google.com/compute/docs/reference/rest/v1/routes) - - -### Properties - -See `properties` section in the schema file(s): - -- [Route](route.py.schema) - -### Usage - -1. Clone the [Deployment Manager samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit): - -```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit -``` - -2. Go to the [dm](../../) directory: - -```shell - cd dm -``` - -3. Copy the example DM config to be used as a model for the deployment; in this case, [examples/route.yaml](examples/route.yaml): - -```shell - cp templates/route/examples/route.yaml my_route.yaml -``` - -4. Change the values in the config file to match your specific GCP setup (for properties, refer to the schema files listed above): - -```shell - vim my_route.yaml # <== change values to match your GCP setup -``` - -5. Create your deployment (replace with the relevant deployment name): - -```shell - gcloud deployment-manager deployments create \ - --config my_route.yaml -``` - -6. In case you need to delete your deployment: - -```shell - gcloud deployment-manager deployments delete -``` -## Examples - -- [Route](examples/route.yaml) diff --git a/dm/templates/route/examples/route.yaml b/dm/templates/route/examples/route.yaml deleted file mode 100644 index 2bbbd07f58b..00000000000 --- a/dm/templates/route/examples/route.yaml +++ /dev/null @@ -1,65 +0,0 @@ -# Example of the route template usage. -# -# In this example, routes are created for 4 types of destination traffic -# handlers: -# 1- a static IP route (test-ip-route) -# 2- an instance route (test-instance-route) -# 3- a gateway route (test-gateway-route) -# 4- a vpn tunnel route (test-vpn-tunnel-route) -# -# The value of the 'routeType' property determines what destination property must -# be set: -# -If the routeType is 'ipaddress', the 'nextHopIp' property must be set. -# Replace the IP placeholder with your static IP address. -# -If the routeType is 'instance', the 'instanceName' property must be set. -# Replace the instance placeholder with your instance name. -# -If the routeType is 'gateway', the 'gatewayName' property must be set. -# This must be set to 'default-internet-gateway'. -# https://cloud.google.com/compute/docs/reference/rest/v1/routes -# -If the routeType is 'vpntunnel', the 'vpnTunnelName' property must be -# set. Replace the vpntunnel placeholder with your VPN -# tunnel name. -# -# Replace the `network` name placeholder with your actual VPC name. -# Replace the `nextHopIp` with a valid IP address for your route -# Replace the `vpnTunnelName` with the actual VPN name. - -imports: - - path: templates/route/route.py - name: route.py - -resources: - - name: test-routes - type: route.py - properties: - network: - routes: - - name: test-ip-route - nextHopIp: - priority: 20000 - destRange: 0.0.0.0/0 - tags: - - my-iproute-tag - - name: test-instance-route - routeType: instance - instanceName: test-instance-name - zone: us-east1-b - priority: 30000 - destRange: 0.0.0.0/0 - tags: - - my-instanceroute-tag - - name: test-gateway-route - routeType: gateway - gatewayName: default-internet-gateway - priority: 40000 - destRange: 0.0.0.0/0 - tags: - - my-gatewayroute-tag - - name: test-vpn-tunnel-route - routeType: vpntunnel - vpnTunnelName: - region: us-east1 - priority: 500 - destRange: 0.0.0.0/0 - tags: - - my-vpntunnelroute-tag diff --git a/dm/templates/route/route.py b/dm/templates/route/route.py deleted file mode 100644 index 838222181d6..00000000000 --- a/dm/templates/route/route.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""This template creates a custom route.""" - - -from hashlib import sha1 -import json - - -def generate_config(context): - """ Entry point for the deployment resources. """ - - properties = context.properties - project_id = properties.get('project', context.env['project']) - - network_name = generate_network_url(properties) - - resources = [] - out = {} - for i, route in enumerate(properties['routes'], 1000): - name = route.get('name') - if not name: - name = '{}-{}'.format(context.env['name'], sha1(json.dumps(route).encode('utf-8')).hexdigest()[:10]) - - route_properties = { - 'name': name, - 'network': network_name, - 'project': project_id, - 'priority': route.get('priority', i), - } - for specified_properties in route: - route_properties[specified_properties] = route[specified_properties] - - resources.append( - { - 'name': name, - 'type': 'single_route.py', - 'properties': route_properties - } - ) - - out[name] = { - 'selfLink': '$(ref.' + name + '.selfLink)', - 'nextHopNetwork': '$(ref.' + name + '.nextHopNetwork)', - } - - outputs = [{'name': 'routes', 'value': out}] - - return {'resources': resources, 'outputs': outputs} - - -def generate_network_url(properties): - """ Gets the network name. """ - - network_name = properties.get('network') - is_self_link = '/' in network_name or '.' in network_name - - if is_self_link: - network_url = network_name - else: - network_url = 'global/networks/{}'.format(network_name) - - return network_url diff --git a/dm/templates/route/route.py.schema b/dm/templates/route/route.py.schema deleted file mode 100644 index 9ef724f192d..00000000000 --- a/dm/templates/route/route.py.schema +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Route - author: Sourced Group Inc. - version: 1.0.0 - description: | - Creates a custom route. - - For more information on this resource: - https://cloud.google.com/vpc/docs/routes - - APIs endpoints used by this template: - - gcp-types/compute-v1:instanceTemplates => - https://cloud.google.com/compute/docs/reference/rest/v1/routes - -imports: - - path: ../route/single_route.py - name: single_route.py - -additionalProperties: false - -required: - - network - - routes - -properties: - network: - type: string - description: | - Name of the network the route applies to. - project: - type: string - description: | - The project ID of the project containing the Route. - routes: - type: array - uniqueItems: True - minItems: 1 - description: A list of routes. - items: - type: object - description: | - Please check the properties in single_route.py.schema for details. - required: - - tags - - destRange - -outputs: - routes: - type: array - description: Array of route information. - items: - description: | - The name of the firewall rule resource. For example, the output can - be referenced as: $(ref..routes..selfLink) - patternProperties: - ".*": - type: object - description: | - Details for a route resource. Please check the outputs in - single_route.py.schema for details. - -documentation: - - templates/route/README.md - -examples: - - templates/route/examples/route.yaml diff --git a/dm/templates/route/single_route.py b/dm/templates/route/single_route.py deleted file mode 100644 index 9cb537714d1..00000000000 --- a/dm/templates/route/single_route.py +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright 2019 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""This template creates a custom route.""" - - -from hashlib import sha1 -import json - - -def generate_config(context): - """ Entry point for the deployment resources. """ - - properties = context.properties - project_id = properties.get('project', context.env['project']) - name = context.env['name'] - - # Set the common route properties. - res_properties = { - 'name': properties.get('name', name), - 'network': properties['network'], - 'project': project_id, - 'tags': properties['tags'], - 'priority': properties['priority'], - 'destRange': properties['destRange'] - } - - # Check the route type and fill out the following fields: - if properties.get('routeType') == 'instance': - instance_name = properties.get('instanceName') - zone = properties.get('zone', '') - res_properties['nextHopInstance'] = generate_instance_url( - project_id, - zone, - instance_name - ) - elif properties.get('routeType') == 'gateway': - gateway_name = properties.get('gatewayName') - res_properties['nextHopGateway'] = generate_gateway_url( - project_id, - gateway_name - ) - elif properties.get('routeType') == 'vpntunnel': - vpn_tunnel_name = properties.get('vpnTunnelName') - region = properties.get('region', '') - res_properties['nextHopVpnTunnel'] = generate_vpn_tunnel_url( - project_id, - region, - vpn_tunnel_name - ) - - optional_properties = [ - 'nextHopIp', - 'nextHopInstance', - 'nextHopNetwork', - 'nextHopGateway', - 'nextHopVpnTunnel', - ] - - for prop in optional_properties: - if prop in properties: - res_properties[prop] = properties[prop] - - resources = [ - { - 'name': name, - # https://cloud.google.com/compute/docs/reference/rest/v1/routes - 'type': 'gcp-types/compute-v1:routes', - 'properties': res_properties - } - ] - - outputs = [ - {'name': 'selfLink', 'value': '$(ref.' + name + '.selfLink)'}, - {'name': 'nextHopNetwork', 'value': properties['network']}, - ] - - return {'resources': resources, 'outputs': outputs} - - -def generate_instance_url(project, zone, instance): - """ Format the resource name as a resource URI. """ - - is_self_link = '/' in instance or '.' in instance - - if is_self_link: - instance_url = instance - else: - instance_url = 'projects/{}/zones/{}/instances/{}' - instance_url = instance_url.format(project, zone, instance) - - return instance_url - - -def generate_gateway_url(project, gateway): - """ Format the resource name as a resource URI. """ - return 'projects/{}/global/gateways/{}'.format(project, gateway) - - -def generate_vpn_tunnel_url(project, region, vpn_tunnel): - """ Format the resource name as a resource URI. """ - is_self_link = '/' in vpn_tunnel or '.' in vpn_tunnel - - if is_self_link: - tunnel_url = vpn_tunnel - else: - tunnel_url = 'projects/{}/regions/{}/vpnTunnels/{}' - tunnel_url = tunnel_url.format(project, region, vpn_tunnel) - return tunnel_url diff --git a/dm/templates/route/single_route.py.schema b/dm/templates/route/single_route.py.schema deleted file mode 100644 index 4f29a1bbc0d..00000000000 --- a/dm/templates/route/single_route.py.schema +++ /dev/null @@ -1,238 +0,0 @@ -# Copyright 2019 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Route - author: Sourced Group Inc. - version: 1.0.0 - description: | - Creates a custom route. - - For more information on this resource: - https://cloud.google.com/vpc/docs/routes - - APIs endpoints used by this template: - - gcp-types/compute-v1:instanceTemplates => - https://cloud.google.com/compute/docs/reference/rest/v1/routes - -additionalProperties: false - -required: -- name -- network -- tags -- destRange - -allOf: - - oneOf: - - required: - - nextHopInstance - - required: - - nextHopNetwork - - required: - - nextHopGateway - - required: - - nextHopVpnTunnel - - anyOf: - - required: - - nextHopIp - - required: - - routeType - - oneOf: - - allOf: - - not: - required: - - routeType - - not: - required: - - nextHopIp - - required: - - nextHopIp - - required: - - instanceName - - required: - - gatewayName - - required: - - vpnTunnelName - - oneOf: - - not: - required: - - gatewayName - - allOf: - - required: - - gatewayName - - routeType - - properties: - routeType: - enum: ["gateway"] - - oneOf: - - not: - required: - - vpnTunnelName - - allOf: - - required: - - vpnTunnelName - - routeType - - region - - properties: - routeType: - enum: ["vpntunnel"] - - oneOf: - - not: - required: - - instanceName - - allOf: - - required: - - instanceName - - routeType - - zone - - properties: - routeType: - enum: ["instance"] - - oneOf: - - not: - required: - - nextHopIp - - allOf: - - required: - - nextHopIp - - routeType - - properties: - routeType: - enum: ["ipaddress"] - - allOf: - - required: - - nextHopIp - - not: - required: - - routeType - -properties: - name: - type: string - description: | - Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, - and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression - [a-z]([-a-z0-9]*[a-z0-9])?. The first character must be a lowercase letter, and all following characters - (except for the last character) must be a dash, lowercase letter, or digit. - The last character must be a lowercase letter or digit. - Resource name would be used if omitted. - description: - type: string - description: | - An optional description of this resource. Provide this property when you create the resource. - network: - type: string - description: | - Name of the network the route applies to. - project: - type: string - description: | - The project ID of the project containing the Route. - routeType: - type: string - description: | - The resource type that will handle the matching packets. - Optionally you can use nextHop* attributes without specifying this field - enum: - - ipaddress - - instance - - gateway - - vpntunnel - tags: - type: array - uniqueItems: True - minItems: 1 - description: | - A list of instance tags to which the route applies. - items: - type: string - description: An instance tag for the route. - priority: - type: number - description: | - The priority of this route. Priority is used to break ties in cases where there is more than one - matching route of equal prefix length. In cases where multiple routes have equal prefix length, the one - with the lowest-numbered priority value wins. The default value is 1000. - The priority value must be from 0 to 65535, inclusive. - default: 1000 - minimum: 0 - maximum: 65535 - destRange: - type: string - description: | - The destination range of outgoing packets the route applies - to. Example: 192.168.0.1/10. Only IPv4 is supported. - pattern: ^([0-9]{1,3}\.){3}[0-9]{1,3}\/[0-9]{1,2}$ - nextHopInstance: - type: string - description: | - The URL to an instance that should handle matching packets. You can specify this as a full or partial URL. - For example: - https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/ - nextHopIp: - type: string - description: | - Used when routeType is 'ipaddress'. - The network IP address of the instance that should handle the matching - packets. Example: 192.168.0.1. Only IPv4 is supported. - pattern: ^([0-9]{1,3}\.){3}[0-9]{1,3}$ - nextHopNetwork: - type: string - description: | - The URL of the local network if it should handle matching packets. - nextHopGateway: - type: string - description: | - The URL to a gateway that should handle matching packets. You can only specify the internet gateway using - a full or partial valid URL: - projects/project/global/gateways/default-internet-gateway - nextHopVpnTunnel: - type: string - description: | - The URL to a VpnTunnel that should handle matching packets. - instanceName: - type: string - description: | - Used when routeType is 'instance'. - The name of the instance that should handle the matching packets. - zone: - type: string - description: | - Used when routeType is 'instance'. - The zone where the instance resides. - gatewayName: - type: string - description: | - Used when routeType is 'gateway'. - The name of the gateway that will handle the matching packets. Only the - 'default-internet-gateway' value is supported. - vpnTunnelName: - type: string - description: | - Used when routeType is 'vpntunnel'. - The name of the VPN tunnel that should handle the matching packets. - region: - type: string - description: | - Used when routeType is 'vpntunnel'. - The region where the VPN tunnel resides. - -outputs: - selfLink: - type: string - description: The URI (SelfLink) of the firewall rule resource. - nextHopNetwork: - type: string - description: URL to a Network that should handle matching packets. diff --git a/dm/templates/route/tests/integration/route.bats b/dm/templates/route/tests/integration/route.bats deleted file mode 100644 index 732b036cabc..00000000000 --- a/dm/templates/route/tests/integration/route.bats +++ /dev/null @@ -1,202 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < "templates/route/tests/integration/${TEST_NAME}.yaml" > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; this is executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - gcloud compute networks create network-${RAND} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --description "integration test ${RAND}" \ - --subnet-mode custom - gcloud compute networks subnets create subnet-${RAND} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --network network-${RAND} \ - --range 10.118.8.0/22 \ - --region us-east1 - gcloud compute routers create router-${RAND} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --network network-${RAND} \ - --asn 65001 \ - --region us-east1 - gcloud compute target-vpn-gateways create gateway-${RAND} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --network network-${RAND} \ - --region us-east1 - gcloud compute addresses create staticip-${RAND} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --region us-east1 - gcloud compute forwarding-rules create esprule-${RAND} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --target-vpn-gateway gateway-${RAND} \ - --region us-east1 \ - --ip-protocol "ESP" \ - --address staticip-${RAND} - gcloud compute forwarding-rules create udp4500rule-${RAND} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --target-vpn-gateway gateway-${RAND} \ - --region us-east1 \ - --ip-protocol "UDP" \ - --address staticip-${RAND} \ - --ports 4500 - gcloud compute forwarding-rules create udp500rule-${RAND} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --target-vpn-gateway gateway-${RAND} \ - --region us-east1 \ - --ip-protocol "UDP" \ - --address staticip-${RAND} \ - --ports 500 - gcloud compute vpn-tunnels create vpntunnel-${RAND} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --peer-address 1.2.3.4 \ - --shared-secret 'superSecretPassw0rd' \ - --target-vpn-gateway gateway-${RAND} \ - --router router-${RAND} \ - --region us-east1 - create_config - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; this is executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - gcloud compute vpn-tunnels delete vpntunnel-${RAND} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --region us-east1 -q - gcloud compute forwarding-rules delete udp500rule-${RAND} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --region us-east1 -q - gcloud compute forwarding-rules delete udp4500rule-${RAND} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --region us-east1 -q - gcloud compute forwarding-rules delete esprule-${RAND} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --region us-east1 -q - gcloud compute addresses delete staticip-${RAND} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --region us-east1 -q - gcloud compute target-vpn-gateways delete gateway-${RAND} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --region us-east1 -q - gcloud compute routers delete router-${RAND} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --region us-east1 -q - gcloud compute networks subnets delete subnet-${RAND} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --region us-east1 -q - gcloud compute networks delete network-${RAND} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -q - delete_config - rm -f ${RANDOM_FILE} - fi - - # Per-test teardown steps. -} - - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - run gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --config "${CONFIG}" - - echo "status = ${status}" - echo "output = ${output}" - - [ "$status" -eq 0 ] -} - -@test "Verifying that resources were created in deployment ${DEPLOYMENT_NAME}" { - run gcloud compute routes list --filter="name:gateway-route-${RAND} AND priority:1002" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - echo "status = ${status}" - echo "output = ${output}" - echo "lines1 = ${lines[1]}" - - [ "$status" -eq 0 ] - [[ "${lines[1]}" =~ "gateway-route-${RAND}" ]] - - run gcloud compute routes list --filter="name:instance-route-${RAND} AND priority:1001" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - echo "status = ${status}" - echo "output = ${output}" - echo "lines1 = ${lines[1]}" - - [ "$status" -eq 0 ] - [[ "${lines[1]}" =~ "instance-route-${RAND}" ]] - - run gcloud compute routes list --filter="(name:ip-route-${RAND} AND priority:20000)" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - echo "status = ${status}" - echo "output = ${output}" - echo "lines1 = ${lines[1]}" - - [ "$status" -eq 0 ] - [[ "${lines[1]}" =~ "ip-route-${RAND}" ]] - - run gcloud compute routes list --filter="(name:vpn-tunnel-route-${RAND} AND priority:500)" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - echo "status = ${status}" - echo "output = ${output}" - echo "lines1 = ${lines[1]}" - - [ "$status" -eq 0 ] - [[ "${lines[1]}" =~ "vpn-tunnel-route-${RAND}" ]] -} - -@test "Deleting deployment" { - gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -q - - run gcloud compute routes list --filter="name:gateway-route-${RAND}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ ! "$output" =~ "gateway-route-${RAND}" ]] - - run gcloud compute routes list --filter="name:instance-route-${RAND}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ ! "$output" =~ "instance-route-${RAND}" ]] - - run gcloud compute routes list --filter="name:ip-route-${RAND}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ ! "$output" =~ "ip-route-${RAND}" ]] - - run gcloud compute routes list --filter="name:vpn-runnel-route-${RAND}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ ! "$output" =~ "vpn-tunnel-route-${RAND}" ]] -} diff --git a/dm/templates/route/tests/integration/route.yaml b/dm/templates/route/tests/integration/route.yaml deleted file mode 100644 index 196110c873d..00000000000 --- a/dm/templates/route/tests/integration/route.yaml +++ /dev/null @@ -1,47 +0,0 @@ -# Test of the route template. -# -# Variables: -# RAND: A random string used by the testing suite. -# - -imports: - - path: templates/route/route.py - name: route.py - -resources: - - name: test-ip-route-${RAND} - type: route.py - properties: - network: network-${RAND} - routes: - - name: ip-route-${RAND} - nextHopIp: 10.118.8.12 - priority: 20000 - destRange: 0.0.0.0/0 - tags: - - my-iproute-tag - - name: instance-route-${RAND} - routeType: instance - instanceName: my-instance-name - zone: us-east1-b - destRange: 0.0.0.0/0 - tags: - - my-instanceroute-tag - - name: gateway-route-${RAND} - routeType: gateway - gatewayName: default-internet-gateway - destRange: 0.0.0.0/0 - tags: - - my-gatewayroute-tag - - name: vpn-tunnel-route-${RAND} - routeType: vpntunnel - vpnTunnelName: vpntunnel-${RAND} - region: us-east1 - priority: 500 - destRange: 0.0.0.0/0 - tags: - - my-vpntunnelroute-tag - - nextHopIp: 10.118.8.13 - destRange: 0.0.0.0/0 - tags: - - my-iproute-tag diff --git a/dm/templates/runtime_config/README.md b/dm/templates/runtime_config/README.md deleted file mode 100644 index 54bc38b84ac..00000000000 --- a/dm/templates/runtime_config/README.md +++ /dev/null @@ -1,78 +0,0 @@ -# Google Cloud Runtime Configurator - -This template creates a Runtime Configurator with the associated resources. - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Install gcloud **beta** components: - - ```shell - gcloud components update - gcloud components install beta - ``` - -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) -- Enable the [Cloud Runtime Configurator API](https://console.developers.google.com/apis/api/runtimeconfig.googleapis.com) -- Grant the [Cloud RuntimeConfig Admin](https://cloud.google.com/deployment-manager/runtime-configurator/access-control) - IAM role to the Deployment Manager service account - -## Deployment - -### Resources - -- [v1beta1.projects.configs](https://cloud.google.com/deployment-manager/runtime-configurator/create-and-delete-runtimeconfig-resources) - -### Properties - -See the `properties` section in the schema file(s): - -- [Runtime Config Schema](runtime_config.py.schema) -- [Variable Schema](variable.py.schema) -- [Waiter Schema](waiter.py.schema) - -### Usage - -1. Clone the [Deployment Manager samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit) - - ```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit - ``` - -2. Go to the [dm](../../) directory - - ```shell - cd dm - ``` - -3. Copy the example DM config to be used as a model for the deployment, - in this case [examples/runtime\_config.yaml](examples/runtime_config.yaml) - - ```shell - cp templates/runtime_config/examples/runtime_config.yaml my_runtime_config.yaml - ``` - -4. Change the values in the config file to match your specific GCP setup. - Refer to the properties in the schema files described above. - - ```shell - vim my_runtime_config.yaml # <== change values to match your GCP setup - ``` - -5. Create your deployment as described below, replacing `` - with your with your own deployment name - - ```shell - gcloud deployment-manager deployments create \ - --config my_runtime_config.yaml - ``` - -6. In case you need to delete your deployment: - - ```shell - gcloud deployment-manager deployments delete - ``` - -## Examples - -- [Cloud Runtime Configurator with Variables and Waiters](examples/runtime_config.yaml) diff --git a/dm/templates/runtime_config/examples/runtime_config.yaml b/dm/templates/runtime_config/examples/runtime_config.yaml deleted file mode 100644 index 557087de7d8..00000000000 --- a/dm/templates/runtime_config/examples/runtime_config.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# Example of the runtime config template usage. - -imports: - - path: templates/runtime_config/runtime_config.py - name: runtime_config.py - -resources: - - name: my-test-config - type: runtime_config.py - properties: - name: my-test-config - description: my config description - variables: - - name: myapp/dev/sql/connection_string - text: super text value - - name: myapp/dev/web/wildcardcert - value: c3VwZXJhd2Vzb21ldGV4dAo= - waiters: - - name: my-test-waiter - timeout: 3.5s - success: - cardinality: - path: myapp/dev - number: 2 diff --git a/dm/templates/runtime_config/runtime_config.py b/dm/templates/runtime_config/runtime_config.py deleted file mode 100644 index 61e36a63db3..00000000000 --- a/dm/templates/runtime_config/runtime_config.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -This template creates a Runtime Configurator with the associated resources. -""" - - -from hashlib import sha1 - - -def generate_config(context): - """ Entry point for the deployment resources. """ - - resources = [] - properties = context.properties - project_id = properties.get('projectId', context.env['project']) - name = properties.get('name', properties.get('config', context.env['name'])) - parent = 'projects/{}/configs/{}'.format(project_id, name) - - # The runtimeconfig resource. - runtime_config = { - 'name': name, - # https://cloud.google.com/deployment-manager/runtime-configurator/reference/rest/v1beta1/projects.configs - 'type': 'gcp-types/runtimeconfig-v1beta1:projects.configs', - 'properties': { - 'config': name, - # TODO: uncomment after gcp type is fixed - # 'project': project_id, - 'description': properties['description'] - } - } - - resources.append(runtime_config) - - # The runtimeconfig variable resources. - for variable in properties.get('variables', []): - suffix = sha1('{}-{}'.format(context.env['name'], variable.get('name', variable.get('variable'))).encode('utf-8')).hexdigest()[:10] - variable['project'] = project_id - variable['parent'] = parent - variable['config'] = name - variable_res = { - 'name': '{}-{}'.format(context.env['name'], suffix), - 'type': 'variable.py', - 'properties': variable - } - resources.append(variable_res) - - # The runtimeconfig waiter resources. - for waiter in properties.get('waiters', []): - suffix = sha1('{}-{}'.format(context.env['name'], waiter.get('name', waiter.get('waiter'))).encode('utf-8')).hexdigest()[:10] - waiter['project'] = project_id - waiter['parent'] = parent - waiter['config'] = name - waiter_res = { - 'name': '{}-{}'.format(context.env['name'], suffix), - 'type': 'waiter.py', - 'properties': waiter - } - resources.append(waiter_res) - - outputs = [{'name': 'configName', 'value': '$(ref.{}.name)'.format(name)}] - - return {'resources': resources, 'outputs': outputs} diff --git a/dm/templates/runtime_config/runtime_config.py.schema b/dm/templates/runtime_config/runtime_config.py.schema deleted file mode 100644 index 8553c88dc08..00000000000 --- a/dm/templates/runtime_config/runtime_config.py.schema +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Runtime Configurator - author: Sourced Group Inc. - version: 1.0.0 - description: | - Supports creation of a Runtime Configurator. - - For more information on this resource, see - https://cloud.google.com/deployment-manager/runtime-configurator/ - - APIs endpoints used by this template: - - gcp-types/runtimeconfig-v1beta1:projects.configs => - https://cloud.google.com/deployment-manager/runtime-configurator/reference/rest/v1beta1/projects.configs - -imports: - - path: variable.py - - path: waiter.py - -additionalProperties: false - -oneOf: - - required: - - config - - required: - - name - -properties: - config: - type: string - description: | - The config resource name. DEPRECATED, use "name" - Resource name would be used if omitted. - name: - type: string - description: | - The config resource name. - Resource name would be used if omitted. - project: - type: string - description: | - The project ID of the project containing resources. The - Google apps domain is prefixed if applicable. - projectId: - type: string - description: ProjectID of the project to create the config in. - description: - type: string - description: The config resource description. - variables: - type: array - uniqItems: true - description: | - The list of variables as defined in the variable.py template. - Example: - - variableName: myappvariable - variableTextValue: "my variable value" - waiters: - type: array - uniqItems: true - description: | - The list of waiters as defined in the waiter.py template. - Example: - - waiter: mywaiter - timeout: 3.5s - success: - cardinality: - path: myapp/dev - number: 2 - - -documentation: - - templates/runtime_config/README.md - -examples: - - templates/runtime_config/examples/runtime_config.yaml diff --git a/dm/templates/runtime_config/tests/integration/runtime_config.bats b/dm/templates/runtime_config/tests/integration/runtime_config.bats deleted file mode 100644 index 9523d89d50e..00000000000 --- a/dm/templates/runtime_config/tests/integration/runtime_config.bats +++ /dev/null @@ -1,137 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" - # Test specific variables. - export CONFIG_NAME="test-config-${RAND}" - export VARIABLE_1="test/dev/db/connection_string" - export VARIABLE_1_VALUE="Server=sqlsrv;Database=mydb;Uid=uname;Pwd=pwd;" - export VARIABLE_2="test/dev/web/appvalue" - # 'my test text value' in base64 - export VARIABLE_2_VALUE="bXkgdGVzdCB0ZXh0IHZhbHVl" - export WAITER_NAME="test-waiter-${RAND}" - export WAITER_TIMEOUT="2.500s" - export WAITER_PATH="test/dev" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < ${BATS_TEST_DIRNAME}/${TEST_NAME}.yaml > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - delete_config - rm -f "${RANDOM_FILE}" - fi - - # Per-test teardown steps. -} - -########## TESTS ########## - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - run gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config "${CONFIG}" --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] -} - -@test "Verify if CONFIG ${CONFIG_NAME} is created " { - run gcloud beta runtime-config configs list --format="value(name)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${CONFIG_NAME}" ]] -} - -@test "Verify if VARIABLE ${VARIABLE_1} is created " { - run gcloud beta runtime-config configs variables list \ - --config-name ${CONFIG_NAME} \ - --format="value(name)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${VARIABLE_1}" ]] -} - -@test "Verify if VARIABLE ${VARIABLE_1} has value ${VARIABLE_1_VALUE} " { - run gcloud beta runtime-config configs variables get-value ${VARIABLE_1} \ - --config-name ${CONFIG_NAME} - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${VARIABLE_1_VALUE}" ]] -} - -@test "Verify if VARIABLE ${VARIABLE_2} is created " { - run gcloud beta runtime-config configs variables list \ - --config-name ${CONFIG_NAME} \ - --format="value(name)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${VARIABLE_2}" ]] -} - -@test "Verify if VARIABLE ${VARIABLE_2} has value ${VARIABLE_2_VALUE} " { - run gcloud beta runtime-config configs variables get-value ${VARIABLE_2} \ - --config-name ${CONFIG_NAME} - [[ "$status" -eq 0 ]] - [[ "$output" =~ "my test text value" ]] -} - -@test "Verify if WAITER ${WAITER_NAME} is created " { - run gcloud beta runtime-config configs waiters list \ - --config-name ${CONFIG_NAME} \ - --format="value(name)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${WAITER_NAME}" ]] -} - -@test "Verify if WAITER ${WAITER_NAME} has timeout ${WAITER_TIMEOUT} " { - run gcloud beta runtime-config configs waiters describe ${WAITER_NAME} \ - --config-name ${CONFIG_NAME} \ - --format="value(timeout)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${WAITER_TIMEOUT}" ]] -} - -@test "Verify if WAITER ${WAITER_NAME} success path is ${WAITER_PATH} " { - run gcloud beta runtime-config configs waiters describe ${WAITER_NAME} \ - --config-name ${CONFIG_NAME} \ - --format="value(success.cardinality.path)" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${WAITER_PATH}" ]] -} - -@test "Deleting deployment ${DEPLOYMENT_NAME}" { - run gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -q - [[ "$status" -eq 0 ]] -} diff --git a/dm/templates/runtime_config/tests/integration/runtime_config.yaml b/dm/templates/runtime_config/tests/integration/runtime_config.yaml deleted file mode 100644 index 996720c33b1..00000000000 --- a/dm/templates/runtime_config/tests/integration/runtime_config.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# Test of the Runtime Configurator template. - -imports: - - path: templates/runtime_config/runtime_config.py - name: runtime_config.py - -resources: - - name: ${CONFIG_NAME} - type: runtime_config.py - properties: - name: ${CONFIG_NAME} - description: my config description - variables: - - name: ${VARIABLE_1} - text: ${VARIABLE_1_VALUE} - - name: ${VARIABLE_2} - value: ${VARIABLE_2_VALUE} - waiters: - - name: ${WAITER_NAME} - timeout: ${WAITER_TIMEOUT} - success: - cardinality: - path: ${WAITER_PATH} - number: 2 diff --git a/dm/templates/runtime_config/variable.py b/dm/templates/runtime_config/variable.py deleted file mode 100644 index 21d0c207ecb..00000000000 --- a/dm/templates/runtime_config/variable.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" Creates a runtimeConfig variable resource. """ - - -def generate_config(context): - """ Entry point for the deployment resources. """ - - properties = context.properties - project_id = properties.get('project', context.env['project']) - config_name = context.properties.get('config') - - props = { - 'variable': properties.get('name', properties.get('variable')), - 'parent': properties['parent'], - # TODO: uncomment after gcp type is fixed - # 'project': project_id, - } - - optional_properties = ['text', 'value'] - props.update({ - p: properties[p] - for p in optional_properties if p in properties - }) - - resources = [{ - 'name': context.env['name'], - # https://cloud.google.com/deployment-manager/runtime-configurator/reference/rest/v1beta1/projects.configs.variables - 'type': 'gcp-types/runtimeconfig-v1beta1:projects.configs.variables', - 'properties': props, - 'metadata': { - 'dependsOn': [config_name] - } - }] - - outputs = [{ - 'name': 'updateTime', - 'value': '$(ref.{}.updateTime)'.format(context.env['name']) - }] - - return {'resources': resources, 'outputs': outputs} diff --git a/dm/templates/runtime_config/variable.py.schema b/dm/templates/runtime_config/variable.py.schema deleted file mode 100644 index 8cbcf916ea1..00000000000 --- a/dm/templates/runtime_config/variable.py.schema +++ /dev/null @@ -1,95 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Variable - author: Sourced Group Inc. - version: 1.0.0 - description: | - Creates a RuntimeConfig variable resource. - - For more information on this resource, see - https://cloud.google.com/deployment-manager/runtime-configurator/ - - APIs endpoints used by this template: - - gcp-types/runtimeconfig-v1beta1:projects.configs.variables => - https://cloud.google.com/deployment-manager/runtime-configurator/reference/rest/v1beta1/projects.configs.variables - -additionalProperties: false - -required: - - parent - -allOf: - - oneOf: - - required: - - text - - required: - - value - - oneOf: - - required: - - variable - - required: - - name - -properties: - project: - type: string - description: | - The project ID of the project containing resources. The - Google apps domain is prefixed if applicable. - parent: - type: string - description: | - The path to the configuration that will own the waiter. - The configuration must exist beforehand; the path must be in the - projects/[PROJECT_ID]/configs/[CONFIG_NAME] format. - name: - type: string - description: | - The key (name) of the variable. For example, status and - users/jane-smith/favorite_color are valid keys. Can contain digits, - letters, dashes, and slashes. The max length is 256 characters. - variable: - type: string - description: | - DEPRECATED, please use "name" - config: - type: string - description: | - Config resource name (for dependency) - text: - type: string - description: | - The string value of the variable. The length of the value must be less - than 4096 bytes. Empty values are also accepted. - For example, text: "my text value". The string must be valid UTF-8. - value: - type: string - description: | - The binary value of the variable. The length of the value must be less - than 4096 bytes. Empty values are also accepted. The value must be base64 - encoded. Only one of value or text can be set. - A base64-encoded string. - -outputs: - updateTime: - type: string - description: The time when the variable was last updated. - -documentation: - - templates/runtime_config/README.md - -examples: - - templates/runtime_config/examples/runtime_config.yaml diff --git a/dm/templates/runtime_config/waiter.py b/dm/templates/runtime_config/waiter.py deleted file mode 100644 index 192327d776a..00000000000 --- a/dm/templates/runtime_config/waiter.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" Creates a runtimeConfig waiter resource. """ - - -def generate_config(context): - """ Entry point for the deployment resources. """ - - properties = context.properties - project_id = properties.get('project', context.env['project']) - config_name = properties.get('config') - - props = { - 'waiter': properties.get('name', properties.get('waiter')), - 'parent': properties['parent'], - 'timeout': properties['timeout'], - 'success': properties['success'], - # TODO: uncomment after gcp type is fixed - # 'project': project_id, - } - - optional_properties = ['failure'] - props.update({ - p: properties[p] - for p in optional_properties if p in properties - }) - - resources = [{ - 'name': context.env['name'], - # https://cloud.google.com/deployment-manager/runtime-configurator/reference/rest/v1beta1/projects.configs.waiters - 'type': 'gcp-types/runtimeconfig-v1beta1:projects.configs.waiters', - 'properties': props, - 'metadata': { - 'dependsOn': [config_name] - } - }] - - outputs = [{ - 'name': 'createTime', - 'value': '$(ref.{}.createTime)'.format(context.env['name']) - }] - - return {'resources': resources, 'outputs': outputs} diff --git a/dm/templates/runtime_config/waiter.py.schema b/dm/templates/runtime_config/waiter.py.schema deleted file mode 100644 index f1ec167398a..00000000000 --- a/dm/templates/runtime_config/waiter.py.schema +++ /dev/null @@ -1,138 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Waiter - author: Sourced Group Inc. - version: 1.0.0 - description: | - Supports creation of a RuntimeConfig Waiter resource. - - For more information on this resource, see - https://cloud.google.com/deployment-manager/runtime-configurator/creating-a-waiter - - APIs endpoints used by this template: - - gcp-types/runtimeconfig-v1beta1:projects.configs.waiters => - https://cloud.google.com/deployment-manager/runtime-configurator/reference/rest/v1beta1/projects.configs.waiters - -additionalProperties: false - -required: - - parent - - timeout - - success - -oneOf: - - required: - - waiter - - required: - - name - -properties: - project: - type: string - description: | - The project ID of the project containing resources. The - Google apps domain is prefixed if applicable. - parent: - type: string - description: | - The path to the configuration that will own the waiter. - The configuration must exist beforehand; the path must be in the - projects/[PROJECT_ID]/configs/[CONFIG_NAME] format. - name: - type: string - description: | - The name of the waiter resource, in the - projects/[PROJECT_ID]/configs/[CONFIG_NAME]/waiters/[WAITER_NAME] format. - Must match the RFC 1035 segment specification. Maximum length is 64 - bytes. - waiter: - type: string - description: | - DEPRECATED, please use "name" - config: - type: string - description: | - Config resource name (for dependency) - timeout: - type: string - description: | - The timeout of the waiter in seconds, beginning from the - instant when the waiters().create method is called; e.g., "3.5s". - If this time elapses before the success or failure conditions are met, - the waiter fails and sets the error code to DEADLINE_EXCEEDED. - failure: - type: object - additionalProperties: false - description: | - The failure condition for the waiter. If this condition is met, done is - set to True, and the error code is set to ABORTED. The failure - condition takes precedence over the success condition. If both conditions - are met, the failure is indicated. This value is optional; if no - failure condition is set, the only failure scenario is the timeout. - properties: - cardinality: - type: object - additionalProperties: false - description: The cardinality of the EndCondition. - properties: - path: - type: string - description: | - The root of the variable subtree to monitor. E.g., /foo. - number: - type: number - default: 1 - description: | - The number of variables under the path that must exist to meet - this condition. If not specified, defaults to 1. - success: - type: object - additionalProperties: false - description: | - The success condition. If this condition is met, done is set to True, - and the error value remains unset. The failure condition takes - precedence over the success condition. If both conditions are met, the - failure is indicated. - properties: - cardinality: - type: object - additionalProperties: false - description: The cardinality of the EndCondition. - properties: - path: - type: string - description: | - The root of the variable subtree to monitor. E.g. /foo. - number: - type: number - default: 1 - description: | - The number of variables under the path that must exist to meet - this condition. If not specified, defaults to 1. - -outputs: - createTime: - type: string - description: | - The instant at which the waiter resource was created. - A timestamp in the RFC3339 UTC "Zulu" format, accurate to nanoseconds. - Example: "2014-10-02T15:01:23.045123456Z". - -documentation: - - templates/runtime_config/README.md - -examples: - - templates/runtime_config/examples/runtime_config.yaml diff --git a/dm/templates/shared_vpc_subnet_iam/README.md b/dm/templates/shared_vpc_subnet_iam/README.md deleted file mode 100644 index 45e08730525..00000000000 --- a/dm/templates/shared_vpc_subnet_iam/README.md +++ /dev/null @@ -1,72 +0,0 @@ -# Shared VPC Subnet IAM - -This template grants IAM roles to a user on a shared VPC subnetwork. - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) -- Create a [network and subnetworks](../network/README.md) -- Grant the [compute.networkAdmin or compute.admin](https://cloud.google.com/compute/docs/access/iam) IAM role to the project service account - -## Deployment - -### Resources - -- [gcp-types/compute-beta:compute.subnetworks.setIamPolicy](https://cloud.google.com/compute/docs/reference/rest/beta/subnetworks/setIamPolicy) -- [gcp-types/compute-beta:compute.subnetworks.getIamPolicy](https://cloud.google.com/compute/docs/reference/rest/beta/subnetworks/getIamPolicy) - -### Properties - -See `properties` section in the schema file(s): - -- [Shared VPC Subnet IAM](shared_vpc_subnet_iam.py.schema) - -### Usage - -1. Clone the [Deployment Manager samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit): - -```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit -``` - -2. Go to the [dm](../../) directory: - -```shell - cd dm -``` - -3. Copy the example DM config to be used as a model for the deployment; in this case, [examples/shared\_vpc\_subnet_iam.yaml](examples/shared_vpc_subnet_iam.yaml): - -```shell - cp templates/shared_vpc_subnet_iam/examples/shared_vpc_subnet_iam.yaml my_shared_vpc_subnet-iam.yaml -``` - -4. Change the values in the config file to match your specific GCP setup (for properties, refer to the schema files listed above): - -```shell - vim my_shared_vpc_subnet-iam.yaml # <== change values to match your GCP setup -``` - -5. Create your deployment (replace with the relevant deployment name): - -```shell - gcloud deployment-manager deployments create \ - --config my_shared_vpc_subnet-iam.yaml -``` - -6. In case you need to delete your deployment: - -```shell - gcloud deployment-manager deployments delete -``` - -## Examples -- [Shared VPC Subnet IAM Bindings syntax](examples/shared_vpc_subnet_iam_bindings.yaml) -- [Shared VPC Subnet IAM Policy syntax](examples/shared_vpc_subnet_iam_policy.yaml) -- [Shared VPC Subnet IAM Legacy](examples/shared_vpc_subnet_iam_legacy.yaml) - -## Tests Cases -- [Shared VPC Subnet IAM Bindings syntax](tests/integration/bindings.bats) -- [Shared VPC Subnet IAM Policy syntax](tests/integration/policy.bats) -- [Shared VPC Subnet IAM Legacy syntax](tests/integration/legacy.bats) diff --git a/dm/templates/shared_vpc_subnet_iam/examples/shared_vpc_subnet_iam_bindings.yaml b/dm/templates/shared_vpc_subnet_iam/examples/shared_vpc_subnet_iam_bindings.yaml deleted file mode 100644 index 19bb2263f75..00000000000 --- a/dm/templates/shared_vpc_subnet_iam/examples/shared_vpc_subnet_iam_bindings.yaml +++ /dev/null @@ -1,29 +0,0 @@ -# Example usage of the shared VPC subnet IAM template -# -# The `members` property is a list of members to -# grant IAM roles on a shared VPC subnetwork. -# A member can be a user, service account, group, or domain. -# -# Replace `resourceId` with a valid subnet ID. - -imports: - - path: templates/shared_vpc_subnet_iam/shared_vpc_subnet_iam.py - name: shared_vpc_subnet_iam.py - -resources: - - name: test-shared-vpc-subnet-iam-policy - type: shared_vpc_subnet_iam.py - properties: - bindings: - - resourceId: test-subnet-1 - region: us-east1 - role: roles/compute.networkUser - members: - - user:name@example.com - - serviceAccount:example@myprojectname.gserviceaccount.com - - resourceId: - region: us-east1 - role: roles/compute.networkUser - members: - - group:admins@example.com - - domain:example.com diff --git a/dm/templates/shared_vpc_subnet_iam/examples/shared_vpc_subnet_iam_legacy.yaml b/dm/templates/shared_vpc_subnet_iam/examples/shared_vpc_subnet_iam_legacy.yaml deleted file mode 100644 index 3281c458197..00000000000 --- a/dm/templates/shared_vpc_subnet_iam/examples/shared_vpc_subnet_iam_legacy.yaml +++ /dev/null @@ -1,29 +0,0 @@ -# Example usage of the shared VPC subnet IAM template -# -# The `members` property is a list of members to -# grant IAM roles on a shared VPC subnetwork. -# A member can be a user, service account, group, or domain. -# -# Replace `subnetId` with a valid subnet ID. - -imports: - - path: templates/shared_vpc_subnet_iam/shared_vpc_subnet_iam.py - name: shared_vpc_subnet_iam.py - -resources: - - name: test-shared-vpc-subnet-iam - type: shared_vpc_subnet_iam.py - properties: - subnets: - - subnetId: test-subnet-1 - region: us-east1 - role: roles/compute.networkUser - members: - - user:name@example.com - - serviceAccount:example@myprojectname.gserviceaccount.com - - subnetId: - region: us-east1 - role: roles/compute.networkUser - members: - - group:admins@example.com - - domain:example.com diff --git a/dm/templates/shared_vpc_subnet_iam/examples/shared_vpc_subnet_iam_policy.yaml b/dm/templates/shared_vpc_subnet_iam/examples/shared_vpc_subnet_iam_policy.yaml deleted file mode 100644 index ba71849d298..00000000000 --- a/dm/templates/shared_vpc_subnet_iam/examples/shared_vpc_subnet_iam_policy.yaml +++ /dev/null @@ -1,30 +0,0 @@ -# Example usage of the shared VPC subnet IAM template -# -# The `members` property is a list of members to -# grant IAM roles on a shared VPC subnetwork. -# A member can be a user, service account, group, or domain. -# -# Replace `resourceId` with a valid subnet ID. - -imports: - - path: templates/shared_vpc_subnet_iam/shared_vpc_subnet_iam.py - name: shared_vpc_subnet_iam.py - -resources: - - name: test-shared-vpc-subnet-iam-policy - type: shared_vpc_subnet_iam.py - properties: - policy: - bindings: - - resourceId: test-subnet-1 - region: us-east1 - role: roles/compute.networkUser - members: - - user:name@example.com - - serviceAccount:example@myprojectname.gserviceaccount.com - - resourceId: - region: us-east1 - role: roles/compute.networkUser - members: - - group:admins@example.com - - domain:example.com diff --git a/dm/templates/shared_vpc_subnet_iam/shared_vpc_subnet_iam.py b/dm/templates/shared_vpc_subnet_iam/shared_vpc_subnet_iam.py deleted file mode 100644 index 38119687b95..00000000000 --- a/dm/templates/shared_vpc_subnet_iam/shared_vpc_subnet_iam.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""This template grants IAM roles to a user on a shared VPC subnetwork.""" - - -def _append_resource(subnets, project, name_id): - """Append subnets to resources.""" - resources = [] - out = {} - for subnet in subnets: - policy_name = 'iam-subnet-policy-{}'.format(subnet[name_id]) - resources.append({ - 'name': policy_name, - # https://cloud.google.com/compute/docs/reference/rest/beta/subnetworks/setIamPolicy - 'type': 'gcp-types/compute-beta:compute.subnetworks.setIamPolicy', - 'properties': { - 'name': subnet[name_id], - 'project': project, - 'region': subnet['region'], - 'bindings': [{ - 'role': subnet['role'], - 'members': subnet['members'] - }] - } - }) - - out[policy_name] = { - 'etag': '$(ref.' + policy_name + '.etag)' - } - return resources, out - - -def generate_config(context): - """Entry point for the deployment resources.""" - try: - resources, out = _append_resource( - context.properties['subnets'], # Legacy syntax - context.env['project'], - 'subnetId' - ) - except KeyError: - try: - resources, out = _append_resource( - context.properties['policy']['bindings'], # Policy syntax - context.env['project'], - 'resourceId' - ) - except KeyError: - resources, out = _append_resource( - context.properties['bindings'], # Bindings syntax - context.env['project'], - 'resourceId' - ) - outputs = [{'name': 'policies', 'value': out}] - - return {'resources': resources, 'outputs': outputs} diff --git a/dm/templates/shared_vpc_subnet_iam/shared_vpc_subnet_iam.py.schema b/dm/templates/shared_vpc_subnet_iam/shared_vpc_subnet_iam.py.schema deleted file mode 100644 index 97734fb03be..00000000000 --- a/dm/templates/shared_vpc_subnet_iam/shared_vpc_subnet_iam.py.schema +++ /dev/null @@ -1,209 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Shared VPC Subnet IAM - author: Sourced Group Inc. - version: 1.0.0 - description: | - Grants IAM roles to a user on a shared VPC subnetwork - - For more information on this resource: - https://cloud.google.com/compute/docs/reference/rest/beta/subnetworks - - APIs endpoints used by this template: - - gcp-types/compute-beta:compute.subnetworks.setIamPolicy => - https://cloud.google.com/compute/docs/reference/rest/beta/subnetworks/setIamPolicy - -oneOf: -- required: - - bindings -- required: - - policy -- required: - - subnets # Legacy - -additionalProperties: false - -project: - type: string - description: The Project ID. - -definitions: - policy: - type: object - description: | - REQUIRED: The complete policy to be applied to the 'resource'. - The size of the policy is limited to a few 10s of KB. An empty policy is - in general a valid policy but certain services (like Projects) might - reject them. - additionalProperties: false - properties: - version: - type: integer - description: Deprecated - bindings: - type: array - description: | - Associates a list of members to a role. bindings with no members will - result in an error. - uniqItems: true - items: - subnetId: # legacy - type: string - description: The subnet ID. - requestId: - type: string - description: Name or id of the resource for this request. - role: - type: string - description: | - Role that is assigned to members. For example, - roles/viewer, roles/editor, or roles/owner. - members: - type: array - description: A list of member identities. - uniqItems: true - items: - type: string - description: | - Specifies the identities requesting access for a Cloud Platform - resource. `members` can have the following values: - - allUsers: A special identifier that represents anyone who - is on the internet; with or without a Google account. - - allAuthenticatedUsers: A special identifier that represents - anyone who is authenticated with a Google account or a - service account. - - user:{emailid} - An email address that represents a - specific Google account. For example, user:name@example.com - - serviceAccount:{emailid} - An email address that represents - a service account. For example, - serviceAccount:my-other-app@appspot.gserviceaccount.com - - group:{emailid} - An email address that represents a Google - group. For example, group:admins@example.com - - domain:{domain} - A Google Apps domain name that represents - all the users of that domain. For example, - google.com or example.com. - condition: - type: object - description: | - The condition that is associated with this binding. NOTE: An - unsatisfied condition will not allow user access via current - binding. Different bindings, including their conditions, are - examined independently. - additionalProperties: false - properties: - expression: - type: string - description: | - Textual representation of an expression in Common Expression - Language syntax. The application context of the containing - message determines which well-known feature set of CEL is - supported. - title: - type: string - description: | - An optional title for the expression, i.e. a short string - describing its purpose. This can be used e.g. in UIs which - allow to enter the expression. - description: - type: string - description: | - An optional description of the expression. This is a longer - text which describes the expression, e.g. when hovered over - it in a UI. - location: - type: string - description: | - An optional string indicating the location of the expression - for error reporting, e.g. a file name and a position in the - file. - auditConfigs: - type: object - description: | - Specifies cloud audit logging configuration for this policy. - additionalProperties: false - properties: - service: - type: string - description: | - Specifies a service that will be enabled for audit logging. - For example, storage.googleapis.com, cloudsql.googleapis.com. - allServices is a special value that covers all services. - auditLogConfigs: - type: object - description: | - The configuration for logging of each type of permission. - additionalProperties: false - properties: - logType: - type: string - description: The log type that this config enables. - exemptedMembers: - type: array - description: | - Specifies the identities that do not cause logging for this - type of permission. Follows the same format of - Binding.members. - uniqItems: true - items: - type: string - etag: - type: string - description: | - etag is used for optimistic concurrency control as a way to help - prevent simultaneous updates of a policy from overwriting each other. - It is strongly suggested that systems make use of the etag in the - read-modify-write cycle to perform policy updates in order to avoid - race conditions: An etag is returned in the response to getIamPolicy, - and systems are expected to put that etag in the request to - setIamPolicy to ensure that their change will be applied to the same - version of the policy. If no etag is provided in the call to - setIamPolicy, then the existing policy is overwritten blindly. - A base64-encoded string. - -properties: - policy: - $ref: '#/definitions/policy' - bindings: - $ref: '#/definitions/policy/properties/bindings' - subnets: # legacy - $ref: '#/definitions/policy/properties/bindings' - etag: - $ref: '#/definitions/policy/properties/etag' - -outputs: - policies: - type: array - description: Array of IAM policy resource information. - items: - description: | - IAM policy resource name. Will be in the format - 'iam-subnet-policy-'. For example, the output - can be referenced as: - $(ref..policies.iam-subnet-policy-.selfLink) - patternProperties: - ".*": - type: object - description: Details for a subnetwork IAM policy. - properties: - etag: - type: string - description: The etag of the subnetwork's IAM policy. - -documentation: - - templates/shared_vpc_subnet_iam/README.md - -examples: - - templates/shared_vpc_subnet_iam/examples/shared_vpc_subnet_iam.yaml diff --git a/dm/templates/shared_vpc_subnet_iam/tests/integration/bindings.bats b/dm/templates/shared_vpc_subnet_iam/tests/integration/bindings.bats deleted file mode 120000 index 785c07d8fb8..00000000000 --- a/dm/templates/shared_vpc_subnet_iam/tests/integration/bindings.bats +++ /dev/null @@ -1 +0,0 @@ -shared_vpc_subnet_iam.bats \ No newline at end of file diff --git a/dm/templates/shared_vpc_subnet_iam/tests/integration/bindings.yaml b/dm/templates/shared_vpc_subnet_iam/tests/integration/bindings.yaml deleted file mode 100644 index 5ca33a8bc4a..00000000000 --- a/dm/templates/shared_vpc_subnet_iam/tests/integration/bindings.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# Test of the shared VPC subnet IAM template. -# -# Variables: -# RAND: A random string used by the testing suite. -# - -imports: - - path: templates/shared_vpc_subnet_iam/shared_vpc_subnet_iam.py - name: shared_vpc_subnet_iam.py - -resources: - - name: test-shared-vpc-subnet-iam-${RAND} - type: shared_vpc_subnet_iam.py - properties: - bindings: - - resourceId: subnet-${RAND}-1 - region: us-east1 - role: roles/compute.networkUser - members: - - serviceAccount:${TEST_SERVICE_ACCOUNT}@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - resourceId: subnet-${RAND}-2 - region: us-east1 - role: roles/compute.networkUser - members: - - serviceAccount:${TEST_SERVICE_ACCOUNT}@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com diff --git a/dm/templates/shared_vpc_subnet_iam/tests/integration/legacy.bats b/dm/templates/shared_vpc_subnet_iam/tests/integration/legacy.bats deleted file mode 120000 index 785c07d8fb8..00000000000 --- a/dm/templates/shared_vpc_subnet_iam/tests/integration/legacy.bats +++ /dev/null @@ -1 +0,0 @@ -shared_vpc_subnet_iam.bats \ No newline at end of file diff --git a/dm/templates/shared_vpc_subnet_iam/tests/integration/legacy.yaml b/dm/templates/shared_vpc_subnet_iam/tests/integration/legacy.yaml deleted file mode 100644 index da29a9bc14a..00000000000 --- a/dm/templates/shared_vpc_subnet_iam/tests/integration/legacy.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# Test of the shared VPC subnet IAM template. -# -# Variables: -# RAND: A random string used by the testing suite. -# - -imports: - - path: templates/shared_vpc_subnet_iam/shared_vpc_subnet_iam.py - name: shared_vpc_subnet_iam.py - -resources: - - name: test-shared-vpc-subnet-iam-${RAND} - type: shared_vpc_subnet_iam.py - properties: - subnets: - - subnetId: subnet-${RAND}-1 - region: us-east1 - role: roles/compute.networkUser - members: - - serviceAccount:${TEST_SERVICE_ACCOUNT}@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - subnetId: subnet-${RAND}-2 - region: us-east1 - role: roles/compute.networkUser - members: - - serviceAccount:${TEST_SERVICE_ACCOUNT}@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com diff --git a/dm/templates/shared_vpc_subnet_iam/tests/integration/policy.bats b/dm/templates/shared_vpc_subnet_iam/tests/integration/policy.bats deleted file mode 120000 index 785c07d8fb8..00000000000 --- a/dm/templates/shared_vpc_subnet_iam/tests/integration/policy.bats +++ /dev/null @@ -1 +0,0 @@ -shared_vpc_subnet_iam.bats \ No newline at end of file diff --git a/dm/templates/shared_vpc_subnet_iam/tests/integration/policy.yaml b/dm/templates/shared_vpc_subnet_iam/tests/integration/policy.yaml deleted file mode 100644 index b38fc74b15e..00000000000 --- a/dm/templates/shared_vpc_subnet_iam/tests/integration/policy.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# Test of the shared VPC subnet IAM template. -# -# Variables: -# RAND: A random string used by the testing suite. -# - -imports: - - path: templates/shared_vpc_subnet_iam/shared_vpc_subnet_iam.py - name: shared_vpc_subnet_iam.py - -resources: - - name: test-shared-vpc-subnet-iam-${RAND} - type: shared_vpc_subnet_iam.py - properties: - policy: - bindings: - - resourceId: subnet-${RAND}-1 - region: us-east1 - role: roles/compute.networkUser - members: - - serviceAccount:${TEST_SERVICE_ACCOUNT}@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - resourceId: subnet-${RAND}-2 - region: us-east1 - role: roles/compute.networkUser - members: - - serviceAccount:${TEST_SERVICE_ACCOUNT}@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com diff --git a/dm/templates/shared_vpc_subnet_iam/tests/integration/shared_vpc_subnet_iam.bats b/dm/templates/shared_vpc_subnet_iam/tests/integration/shared_vpc_subnet_iam.bats deleted file mode 100755 index c9ea6e29dc4..00000000000 --- a/dm/templates/shared_vpc_subnet_iam/tests/integration/shared_vpc_subnet_iam.bats +++ /dev/null @@ -1,100 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -export TEST_SERVICE_ACCOUNT="test-sa-${RAND}" - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < "templates/shared_vpc_subnet_iam/tests/integration/${TEST_NAME}.yaml" > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; this is executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - gcloud compute networks create "network-${RAND}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --description "Integration test ${RAND}" \ - --subnet-mode custom - gcloud compute networks subnets create "subnet-${RAND}-1" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --network "network-${RAND}" \ - --range 10.118.8.0/22 \ - --region us-east1 - gcloud compute networks subnets create "subnet-${RAND}-2" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --network "network-${RAND}" \ - --range 192.168.0.0/16 \ - --region us-east1 - gcloud iam service-accounts create "${TEST_SERVICE_ACCOUNT}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - create_config - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; this is executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - gcloud compute networks subnets delete "subnet-${RAND}-2" --region us-east1 \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -q - gcloud compute networks subnets delete "subnet-${RAND}-1" --region us-east1 \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -q - gcloud compute networks delete network-${RAND} --project "${CLOUD_FOUNDATION_PROJECT_ID}" -q - gcloud iam service-accounts delete "${TEST_SERVICE_ACCOUNT}@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -q - delete_config - rm -f "${RANDOM_FILE}" - fi - - # Per-test teardown steps. -} - - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" --config "${CONFIG}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -} - -@test "Verifying that roles were granted in deployment ${DEPLOYMENT_NAME}" { - run gcloud beta compute networks subnets get-iam-policy "subnet-${RAND}-1" --region us-east1 \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" --filter="bindings.members:${TEST_SERVICE_ACCOUNT}" - [[ "$output" =~ "serviceAccount:${TEST_SERVICE_ACCOUNT}@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com" ]] - [[ "$output" =~ "roles/compute.networkUser" ]] - - run gcloud beta compute networks subnets get-iam-policy "subnet-${RAND}-2" --region us-east1 \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" --filter="bindings.members:${TEST_SERVICE_ACCOUNT}" - [[ "$output" =~ "serviceAccount:${TEST_SERVICE_ACCOUNT}@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com" ]] - [[ "$output" =~ "roles/compute.networkUser" ]] -} - -@test "Deleting deployment" { - gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" -q --project "${CLOUD_FOUNDATION_PROJECT_ID}" -} diff --git a/dm/templates/shared_vpc_subnet_iam/tests/integration/shared_vpc_subnet_iam.yaml b/dm/templates/shared_vpc_subnet_iam/tests/integration/shared_vpc_subnet_iam.yaml deleted file mode 100644 index da29a9bc14a..00000000000 --- a/dm/templates/shared_vpc_subnet_iam/tests/integration/shared_vpc_subnet_iam.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# Test of the shared VPC subnet IAM template. -# -# Variables: -# RAND: A random string used by the testing suite. -# - -imports: - - path: templates/shared_vpc_subnet_iam/shared_vpc_subnet_iam.py - name: shared_vpc_subnet_iam.py - -resources: - - name: test-shared-vpc-subnet-iam-${RAND} - type: shared_vpc_subnet_iam.py - properties: - subnets: - - subnetId: subnet-${RAND}-1 - region: us-east1 - role: roles/compute.networkUser - members: - - serviceAccount:${TEST_SERVICE_ACCOUNT}@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com - - subnetId: subnet-${RAND}-2 - region: us-east1 - role: roles/compute.networkUser - members: - - serviceAccount:${TEST_SERVICE_ACCOUNT}@${CLOUD_FOUNDATION_PROJECT_ID}.iam.gserviceaccount.com diff --git a/dm/templates/ssl_certificate/README.md b/dm/templates/ssl_certificate/README.md deleted file mode 100644 index 977d6f84b40..00000000000 --- a/dm/templates/ssl_certificate/README.md +++ /dev/null @@ -1,72 +0,0 @@ -# SSL Certificate - -This template creates an SSL certificate. - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) -- Grant the [roles/compute.securityAdmin](https://cloud.google.com/compute/docs/access/iam), - or [compute.loadBalancerAdmin](https://cloud.google.com/compute/docs/access/iam) - IAM role to the Deployment Manager service account - -## Deployment - -### Resources - -- [gcp-types/compute-v1:sslCertificates](https://cloud.google.com/compute/docs/reference/rest/v1/sslCertificates) -- [gcp-types/compute-beta:sslCertificates](https://cloud.google.com/compute/docs/reference/rest/beta/sslCertificates) -(If `betaFeaturesEnabled` flag turned on.) - -### Properties - -See the `properties` section in the schema file(s): - -- [SSL Certificate](ssl_certificate.py.schema) - -### Usage - -1. Clone the [Deployment Manager Samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit): - -```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit -``` - -2. Go to the [dm](../../) directory: - -```shell - cd dm -``` - -3. Copy the example DM config to be used as a model for the deployment; in this - case, [examples/ssl\_certificate.yaml](examples/ssl_certificate.yaml): - -```shell - cp templates/ssl_certificate/examples/ssl_certificate.yaml \ - my_ssl_certificate.yaml -``` - -4. Change the values in the config file to match your specific GCP setup (for - properties, refer to the schema files listed above): - -```shell - vim my_ssl_certificate.yaml # <== change values to match your GCP setup -``` - -5. Create your deployment (replace \ with the relevant - deployment name): - -```shell - gcloud deployment-manager deployments create \ - --config my_ssl_certificate.yaml -``` - -6. In case you need to delete your deployment: - -```shell - gcloud deployment-manager deployments delete -``` - -## Examples - -- [SSL Certificate](examples/ssl_certificate.yaml) diff --git a/dm/templates/ssl_certificate/examples/ssl_certificate.yaml b/dm/templates/ssl_certificate/examples/ssl_certificate.yaml deleted file mode 100644 index 88ef96018c3..00000000000 --- a/dm/templates/ssl_certificate/examples/ssl_certificate.yaml +++ /dev/null @@ -1,16 +0,0 @@ -# Example of the SSL certificate template usage. -# -# Replace the following placeholders with valid values: -# : contents of the certificate file -# : contents of the private key file - -imports: - - path: templates/ssl_certificate/ssl_certificate.py - name: ssl_certificate.py - -resources: - - name: example-certificate - type: ssl_certificate.py - properties: - certificate: - privateKey: diff --git a/dm/templates/ssl_certificate/examples/ssl_certificate_managed.yaml b/dm/templates/ssl_certificate/examples/ssl_certificate_managed.yaml deleted file mode 100644 index 7033fd5bddf..00000000000 --- a/dm/templates/ssl_certificate/examples/ssl_certificate_managed.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# Example of the SSL certificate template usage. -# -# Replace the following placeholders with valid values: -# : DNS name for the managed certificate. (ex: test.cft.com) - -imports: - - path: templates/ssl_certificate/ssl_certificate.py - name: ssl_certificate.py - -resources: - - name: example-certificate - type: ssl_certificate.py - properties: - betaFeaturesEnabled: True - type: MANAGED - managed: - domains: - - diff --git a/dm/templates/ssl_certificate/ssl_certificate.py b/dm/templates/ssl_certificate/ssl_certificate.py deleted file mode 100644 index caa7f7e49c4..00000000000 --- a/dm/templates/ssl_certificate/ssl_certificate.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" This template creates an SSL certificate. """ - - -def set_optional_property(destination, source, prop_name): - """ Copies the property value if present. """ - - if prop_name in source: - destination[prop_name] = source[prop_name] - - -def generate_config(context): - """ Entry point for the deployment resources. """ - - properties = context.properties - name = properties.get('name', context.env['name']) - project_id = properties.get('project', context.env['project']) - - ssl_props = { - 'name': name, - 'project': project_id, - } - - if properties.get('betaFeaturesEnabled', False): - gcptype = 'gcp-types/compute-beta:sslCertificates' - else: - gcptype = 'gcp-types/compute-v1:sslCertificates' - - resource = { - 'name': context.env['name'], - # https://cloud.google.com/compute/docs/reference/rest/v1/sslCertificates - 'type': gcptype, - 'properties': ssl_props, - } - - for prop in [ - 'privateKey', - 'certificate', - 'description', - 'managed', - 'selfManaged', - 'type']: - set_optional_property(ssl_props, properties, prop) - - return { - 'resources': [resource], - 'outputs': - [ - { - 'name': 'name', - 'value': name - }, - { - 'name': 'selfLink', - 'value': '$(ref.{}.selfLink)'.format(context.env['name']) - } - ] - } diff --git a/dm/templates/ssl_certificate/ssl_certificate.py.schema b/dm/templates/ssl_certificate/ssl_certificate.py.schema deleted file mode 100644 index 47eba8d3762..00000000000 --- a/dm/templates/ssl_certificate/ssl_certificate.py.schema +++ /dev/null @@ -1,140 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: SSL Certificate - author: Sourced Group Inc. - version: 1.1.0 - description: | - Supports creation of the SSL certificate resource. - - For more information on this resource: - https://cloud.google.com/load-balancing/docs/ssl-certificates - - APIs endpoints used by this template: - - gcp-types/compute-v1:sslCertificates => - https://cloud.google.com/compute/docs/reference/rest/v1/sslCertificates - -additionalProperties: false - -oneOf: - - required: - - privateKey - - certificate - - required: - - managed - - required: - - selfManaged - -dependencies: - managed: - required: - - betaFeaturesEnabled - - type - properties: - type: - enum: - - MANAGED - selfManaged: - required: - - betaFeaturesEnabled - - type - properties: - type: - enum: - - SELF_MANAGED - -properties: - name: - type: string - description: | - Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, - and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression - [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following - characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - Resource name would be used if omitted. - project: - type: string - description: | - The project ID of the project containing the SSL instance. The - Google apps domain is prefixed if applicable. - description: - type: string - description: | - An optional description of this resource. Provide this property when you create the resource. - privateKey: - type: string - description: | - The write-only private key in the PEM format. - certificate: - type: string - description: | - The local certificate file. The certificate must be in the PEM format. - The certificate chain must be no greater than 5 certs long. The chain - must include at least one intermediate cert. - managed: - type: object - description: | - Configuration and status of a managed SSL certificate. - properties: - domains: - type: array - uniqItems: true - items: - type: string - description: | - The domains for which a managed SSL certificate will be generated. Currently only single-domain certs are supported. - selfManaged: - type: object - description: | - Configuration and status of a self-managed SSL certificate. - required: - - privateKey - - certificate - properties: - certificate: - type: string - description: | - A local certificate file. The certificate must be in PEM format. The certificate chain must be no greater than 5 certs long. The chain must include at least one intermediate cert. - privateKey: - type: string - description: | - A write-only private key in PEM format. Only insert requests will include this field. - type: - type: string - enum: - - SELF_MANAGED - - MANAGED - description: | - (Optional) Specifies the type of SSL certificate, either "SELF_MANAGED" or "MANAGED". If not specified, the certificate is self-managed and the fields certificate and privateKey are used. - betaFeaturesEnabled: - type: boolean - default: false - description: | - If this flag is enabled, beta properties can be used and the beta type will be used. - -outputs: - name: - type: string - description: The resource name. - selfLink: - type: string - description: The URI (SelfLink) of the SSL certificate resource. - -documentation: - - templates/ssl_certificate/README.md - -examples: - - templates/ssl_certificate/examples/ssl_certificate.yaml - diff --git a/dm/templates/ssl_certificate/tests/integration/ssl_certificate.bats b/dm/templates/ssl_certificate/tests/integration/ssl_certificate.bats deleted file mode 100755 index f6642eb2b94..00000000000 --- a/dm/templates/ssl_certificate/tests/integration/ssl_certificate.bats +++ /dev/null @@ -1,91 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores with dashes in the deployment name. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" - # Test specific variables. - export CERT_NAME="test-certificate-${RAND}" - export CERT_DESCRIPTION="test certificate description" - export CERT_EXTRACT="RXhhbXBsZSBPcmcuMRQwEgYDVQQDDAtleGFtcGxlLmNvbTAeFw0xODEwMTEyMDEy" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < ${BATS_TEST_DIRNAME}/${TEST_NAME}.yaml > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - delete_config - fi - - # Per-test teardown steps. -} - - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - run gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config ${CONFIG} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - echo "Status: $status" - echo "Output: $output" - - [[ "$status" -eq 0 ]] -} - -@test "Verifying certificate" { - run gcloud compute ssl-certificates describe "${CERT_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - echo "Status: $status" - echo "Output: $output" - - [[ "$status" -eq 0 ]] - [[ "$output" =~ "description: ${CERT_DESCRIPTION}" ]] - [[ "$output" =~ "${CERT_EXTRACT}" ]] -} - -@test "Deleting deployment" { - run gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" -q \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - - echo "Status: $status" - echo "Output: $output" - - [[ "$status" -eq 0 ]] -} - diff --git a/dm/templates/ssl_certificate/tests/integration/ssl_certificate.yaml b/dm/templates/ssl_certificate/tests/integration/ssl_certificate.yaml deleted file mode 100644 index 7ed8141030b..00000000000 --- a/dm/templates/ssl_certificate/tests/integration/ssl_certificate.yaml +++ /dev/null @@ -1,127 +0,0 @@ -# Test of the SSL certificate template. - -imports: - - path: templates/ssl_certificate/ssl_certificate.py - name: ssl_certificate.py - -resources: - - name: test-cert - type: ssl_certificate.py - properties: - name: ${CERT_NAME} - description: ${CERT_DESCRIPTION} - certificate: | - -----BEGIN CERTIFICATE----- - MIIDODCCAiACCQCqBGuEeBXJTjANBgkqhkiG9w0BAQUFADBeMQswCQYDVQQGEwJD - QTEQMA4GA1UECAwHT250YXJpbzEQMA4GA1UEBwwHVG9yb250bzEVMBMGA1UECgwM - RXhhbXBsZSBPcmcuMRQwEgYDVQQDDAtleGFtcGxlLmNvbTAeFw0xODEwMTEyMDEy - MjVaFw0xOTEwMTEyMDEyMjVaMF4xCzAJBgNVBAYTAkNBMRAwDgYDVQQIDAdPbnRh - cmlvMRAwDgYDVQQHDAdUb3JvbnRvMRUwEwYDVQQKDAxFeGFtcGxlIE9yZy4xFDAS - BgNVBAMMC2V4YW1wbGUuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC - AQEA5yLVBWSqKRWSJrGh2nbW0j+Soy+uvDKCjSZtXIVIcWvSP+WXd6RE2zlsNee4 - UA90Jjhx6Si2MxraCNblm4MJSQg46irBn4Vmex/2iY2vD1MbMEusTNIfpDh3A27H - qdBe+WSO0uHXfmgC68M8mgTMcMLVoUsGgv8B710qBN1D9EKPZdEhYBAEhC1RFj+8 - o3krWa983QFVND4PeFJSZe0LxAg44/bHVFNKK5Ub+TPa9P3t86SpoNj9/M7DPSkh - AtqmHL+90G0gvkfvtaoNRHHdqk2X+Uz3cUXWgl8xE+X5wcWu7r3cMxPaJvuLldJa - SIcc4YIlKOB0nFDKPK+iy7LqPwIDAQABMA0GCSqGSIb3DQEBBQUAA4IBAQAdf4dG - IkEmdNmTeGPVOUis+7ziWzRPxt8Vpmuq24z4H9mIkAPo/2aLpMKH7bloeYvz8blu - 5VQZx7StoE5Sde1ss/AoaL1dVJi/dgmeN2cHy7J6POu3e9n6yXGiIh0qHlFe83nJ - RVIqtN9QGGuabGt3WGbKElMKwrCl9NGhExi/LntPFllXfTLb2pVGXH47ZihynbUj - 4S21+KnQUPjhg6Na6hP3qLVqSYtWataJFpy6DOG1wgoAWjagNc3ltGdmv6O/ZkI3 - 3vymENyn8G7n+Z1knXUXxv4rJoeiYbZ7/2/bQ8BTc/RI5qnhzO8VYmyAZrrKGZnD - W2xuikK4nQHsideP - -----END CERTIFICATE----- - privateKey: | - -----BEGIN RSA PRIVATE KEY----- - MIIEowIBAAKCAQEA5yLVBWSqKRWSJrGh2nbW0j+Soy+uvDKCjSZtXIVIcWvSP+WX - d6RE2zlsNee4UA90Jjhx6Si2MxraCNblm4MJSQg46irBn4Vmex/2iY2vD1MbMEus - TNIfpDh3A27HqdBe+WSO0uHXfmgC68M8mgTMcMLVoUsGgv8B710qBN1D9EKPZdEh - YBAEhC1RFj+8o3krWa983QFVND4PeFJSZe0LxAg44/bHVFNKK5Ub+TPa9P3t86Sp - oNj9/M7DPSkhAtqmHL+90G0gvkfvtaoNRHHdqk2X+Uz3cUXWgl8xE+X5wcWu7r3c - MxPaJvuLldJaSIcc4YIlKOB0nFDKPK+iy7LqPwIDAQABAoIBACHHm25gWeYLOwLg - rxDokVjE5b9ntpfWofHTDeSZrg61fVLNUSexIEcRy1jNdshsmcMEONGkm4w4fmkQ - Txo9OlwsEXVXrliL/IA+GZ/czxrkQHL8fD/17Z3oiqw7wn5074xvP9heHUpiFRsz - u2WfEeng76vU9Syr7DJ5YSy40beew48gJsfclbbAWl1GQ4s1wluoMlutCWjRKSAD - Qg1pjJQuTHDpD+PNgHrx4Xbyjyo6tGqcdt9B0crhuxwTZXUogQsRKRuMHlxxBsbm - kINhSsNf8V8iRCBtZ4FPWcq+Rk/KntNzB9NZQFmrH5hS0oQmZjzNAyzCXIaYTji7 - Ju8XDaECgYEA9hUDBBwniphLZxvIC8GHVgAFx76Xw085bksVI0jNl2yG1HgNjCNA - W7DXJnyAtJZQjaItZfvB/tMm/ZAypf77tnru2n/uRvB4uG1Yh7RSy7rhLpibvTpU - e+DHm2c6kVW6Ng4q6rFxaunpjKEaeZO8pKowUu4YGU9YaSqvIGwoPFcCgYEA8HOc - 1J5Rop56BPvJgozqQRRQ3Q3AFfzlyYEniF35twIqnehemU3nJMdVp9jbZizOcrmu - ZBma5c5P5Bjgam3SQvswTUxmbIZ2VvvXOv5aPeldNdFHrADpVmOdKwcPxQ8qx+IT - GK3rrVRkH6+JByseHhxl3igIM7fAtbd27ENDkFkCgYEA9YmhqMgu7CtpkUg3IwPH - dhgvrE6QP2EdfN+OB9bszNqM7hOb8Oh7nwGkq9Iu2gHh/nCDu+6ocwtdLERlRRxX - LI0dJwffSQlIaz0vyLg0pPOjHEtJmlZJVhHDGVy3I6zWUHlyeRr0gClFz/wv3n97 - CxKFhTns8dQp80WT2FYTD6ECgYBU0KMYSIQJNZda3Km22BflPtJLNwdzehJf4qPc - MTHdQPFhY87Cir0mtv1ayF6TiuiDhUWjX3jI6N47Wh8Gy5goMkxWZ8WVMFTb19eS - opeYURGk4x5B6MxlwZt1yvbgDrqLaQ5NXUPNjwAGQTe3hJkKDABOvZYvD/j04DMd - oZhaeQKBgGGgnxTTUTEdqZ/AsVD0NmaqauTmyjsUpmAph9oazERM729n9igob85z - KXQmD9gmtTrCuv8LGyEPFsIhlBTOlLyzHpMhI2Hd23hzQp8v09ZdDpx8SqHv0THW - y8YMreKih6+reSfC+GuOzQoKi4vTKO7wwuXYysXkg3juupqZ7Kab - -----END RSA PRIVATE KEY----- - - name: test-cert1 - type: ssl_certificate.py - properties: - name: ${CERT_NAME}-1 - betaFeaturesEnabled: True - type: SELF_MANAGED - description: ${CERT_DESCRIPTION} - selfManaged: - certificate: | - -----BEGIN CERTIFICATE----- - MIIDODCCAiACCQCqBGuEeBXJTjANBgkqhkiG9w0BAQUFADBeMQswCQYDVQQGEwJD - QTEQMA4GA1UECAwHT250YXJpbzEQMA4GA1UEBwwHVG9yb250bzEVMBMGA1UECgwM - RXhhbXBsZSBPcmcuMRQwEgYDVQQDDAtleGFtcGxlLmNvbTAeFw0xODEwMTEyMDEy - MjVaFw0xOTEwMTEyMDEyMjVaMF4xCzAJBgNVBAYTAkNBMRAwDgYDVQQIDAdPbnRh - cmlvMRAwDgYDVQQHDAdUb3JvbnRvMRUwEwYDVQQKDAxFeGFtcGxlIE9yZy4xFDAS - BgNVBAMMC2V4YW1wbGUuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC - AQEA5yLVBWSqKRWSJrGh2nbW0j+Soy+uvDKCjSZtXIVIcWvSP+WXd6RE2zlsNee4 - UA90Jjhx6Si2MxraCNblm4MJSQg46irBn4Vmex/2iY2vD1MbMEusTNIfpDh3A27H - qdBe+WSO0uHXfmgC68M8mgTMcMLVoUsGgv8B710qBN1D9EKPZdEhYBAEhC1RFj+8 - o3krWa983QFVND4PeFJSZe0LxAg44/bHVFNKK5Ub+TPa9P3t86SpoNj9/M7DPSkh - AtqmHL+90G0gvkfvtaoNRHHdqk2X+Uz3cUXWgl8xE+X5wcWu7r3cMxPaJvuLldJa - SIcc4YIlKOB0nFDKPK+iy7LqPwIDAQABMA0GCSqGSIb3DQEBBQUAA4IBAQAdf4dG - IkEmdNmTeGPVOUis+7ziWzRPxt8Vpmuq24z4H9mIkAPo/2aLpMKH7bloeYvz8blu - 5VQZx7StoE5Sde1ss/AoaL1dVJi/dgmeN2cHy7J6POu3e9n6yXGiIh0qHlFe83nJ - RVIqtN9QGGuabGt3WGbKElMKwrCl9NGhExi/LntPFllXfTLb2pVGXH47ZihynbUj - 4S21+KnQUPjhg6Na6hP3qLVqSYtWataJFpy6DOG1wgoAWjagNc3ltGdmv6O/ZkI3 - 3vymENyn8G7n+Z1knXUXxv4rJoeiYbZ7/2/bQ8BTc/RI5qnhzO8VYmyAZrrKGZnD - W2xuikK4nQHsideP - -----END CERTIFICATE----- - privateKey: | - -----BEGIN RSA PRIVATE KEY----- - MIIEowIBAAKCAQEA5yLVBWSqKRWSJrGh2nbW0j+Soy+uvDKCjSZtXIVIcWvSP+WX - d6RE2zlsNee4UA90Jjhx6Si2MxraCNblm4MJSQg46irBn4Vmex/2iY2vD1MbMEus - TNIfpDh3A27HqdBe+WSO0uHXfmgC68M8mgTMcMLVoUsGgv8B710qBN1D9EKPZdEh - YBAEhC1RFj+8o3krWa983QFVND4PeFJSZe0LxAg44/bHVFNKK5Ub+TPa9P3t86Sp - oNj9/M7DPSkhAtqmHL+90G0gvkfvtaoNRHHdqk2X+Uz3cUXWgl8xE+X5wcWu7r3c - MxPaJvuLldJaSIcc4YIlKOB0nFDKPK+iy7LqPwIDAQABAoIBACHHm25gWeYLOwLg - rxDokVjE5b9ntpfWofHTDeSZrg61fVLNUSexIEcRy1jNdshsmcMEONGkm4w4fmkQ - Txo9OlwsEXVXrliL/IA+GZ/czxrkQHL8fD/17Z3oiqw7wn5074xvP9heHUpiFRsz - u2WfEeng76vU9Syr7DJ5YSy40beew48gJsfclbbAWl1GQ4s1wluoMlutCWjRKSAD - Qg1pjJQuTHDpD+PNgHrx4Xbyjyo6tGqcdt9B0crhuxwTZXUogQsRKRuMHlxxBsbm - kINhSsNf8V8iRCBtZ4FPWcq+Rk/KntNzB9NZQFmrH5hS0oQmZjzNAyzCXIaYTji7 - Ju8XDaECgYEA9hUDBBwniphLZxvIC8GHVgAFx76Xw085bksVI0jNl2yG1HgNjCNA - W7DXJnyAtJZQjaItZfvB/tMm/ZAypf77tnru2n/uRvB4uG1Yh7RSy7rhLpibvTpU - e+DHm2c6kVW6Ng4q6rFxaunpjKEaeZO8pKowUu4YGU9YaSqvIGwoPFcCgYEA8HOc - 1J5Rop56BPvJgozqQRRQ3Q3AFfzlyYEniF35twIqnehemU3nJMdVp9jbZizOcrmu - ZBma5c5P5Bjgam3SQvswTUxmbIZ2VvvXOv5aPeldNdFHrADpVmOdKwcPxQ8qx+IT - GK3rrVRkH6+JByseHhxl3igIM7fAtbd27ENDkFkCgYEA9YmhqMgu7CtpkUg3IwPH - dhgvrE6QP2EdfN+OB9bszNqM7hOb8Oh7nwGkq9Iu2gHh/nCDu+6ocwtdLERlRRxX - LI0dJwffSQlIaz0vyLg0pPOjHEtJmlZJVhHDGVy3I6zWUHlyeRr0gClFz/wv3n97 - CxKFhTns8dQp80WT2FYTD6ECgYBU0KMYSIQJNZda3Km22BflPtJLNwdzehJf4qPc - MTHdQPFhY87Cir0mtv1ayF6TiuiDhUWjX3jI6N47Wh8Gy5goMkxWZ8WVMFTb19eS - opeYURGk4x5B6MxlwZt1yvbgDrqLaQ5NXUPNjwAGQTe3hJkKDABOvZYvD/j04DMd - oZhaeQKBgGGgnxTTUTEdqZ/AsVD0NmaqauTmyjsUpmAph9oazERM729n9igob85z - KXQmD9gmtTrCuv8LGyEPFsIhlBTOlLyzHpMhI2Hd23hzQp8v09ZdDpx8SqHv0THW - y8YMreKih6+reSfC+GuOzQoKi4vTKO7wwuXYysXkg3juupqZ7Kab - -----END RSA PRIVATE KEY----- - - name: test-cert2 - type: ssl_certificate.py - properties: - name: ${CERT_NAME}-2 - betaFeaturesEnabled: True - type: MANAGED - managed: - domains: - - test.cloud.cft.tips diff --git a/dm/templates/stackdriver_metric_descriptor/README.md b/dm/templates/stackdriver_metric_descriptor/README.md deleted file mode 100644 index 77e19343e9f..00000000000 --- a/dm/templates/stackdriver_metric_descriptor/README.md +++ /dev/null @@ -1,81 +0,0 @@ -# Stackdriver Metric Descriptor - -This template creates a Stackdriver Metric Descriptor. - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) -- Create a [Stackdriver Workspace](https://cloud.google.com/monitoring/workspaces/) -- Log in to the [Stackdriver Workspace](https://cloud.google.com/monitoring/workspaces/) - where the metric has to be deployed -- Grant the [monitoring.admin](https://cloud.google.com/monitoring/access-control) - IAM role to the Deployment Manager service account - -## Deployment - -### Resources - -- [projects.metricDescriptors](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) -- [GCP metric list](https://cloud.google.com/monitoring/api/metrics_gcp) -- [AWS metric list](https://cloud.google.com/monitoring/api/metrics_aws) -- [Stackdriver Agent metric list](https://cloud.google.com/monitoring/api/metrics_agent) -- [External metric list](https://cloud.google.com/monitoring/api/metrics_other) - -### Properties - -See the `properties` section in the schema file(s): - -- [Stackdriver Metric Descriptor](stackdriver_metric_descriptor.py.schema) - -### Usage - -1. Clone the [Deployment Manager samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit) - - ```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit - ``` - -2. Go to the [dm](../../) directory - - ```shell - cd dm - ``` - -3. Copy the example DM config to be used as a model for the deployment, - in this case [examples/stackdriver\_metric\_descriptor.yaml](examples/stackdriver_metric_descriptor.yaml) - - ```shell - cp templates/stackdriver_metric_descriptor/examples/stackdriver_metric_descriptor.yaml my_metric_descriptor.yaml - ``` - -4. Change the values in the config file to match your specific GCP setup. - Refer to the properties in the schema files described above. - - ```shell - vim my_metric_descriptor.yaml # <== Replace placeholders if any - ``` - -5. Set the project context to use the Stackdriver Workspace project - - ```shell - gcloud config set project - ``` - -6. Create your deployment as described below, replacing `` - with your with your own deployment name - - ```shell - gcloud deployment-manager deployments create \ - --config my_metric_descriptor.yaml - ``` - -7. In case you need to delete your deployment - - ```shell - gcloud deployment-manager deployments delete - ``` - -## Examples - -- [Stackdriver Metric Descriptor](examples/stackdriver_metric_descriptor.yaml) diff --git a/dm/templates/stackdriver_metric_descriptor/examples/stackdriver_metric_descriptor.yaml b/dm/templates/stackdriver_metric_descriptor/examples/stackdriver_metric_descriptor.yaml deleted file mode 100644 index 6ca8810c39f..00000000000 --- a/dm/templates/stackdriver_metric_descriptor/examples/stackdriver_metric_descriptor.yaml +++ /dev/null @@ -1,23 +0,0 @@ -# Example of the Stackdriver Metric Descriptor template usage. -# -# NOTE: For supported metric types, kind, valueType, and unit, see -# https://cloud.google.com/monitoring/api/metrics. - -imports: - - path: templates/stackdriver_metric_descriptor/stackdriver_metric_descriptor.py - name: metric_descriptor.py - -resources: - - name: my-custom-metric - type: metric_descriptor.py - properties: - displayName: my-custom-metric - description: My custom metric descriptor - type: custom.googleapis.com/agent/log_entry_retry_count - metricKind: CUMULATIVE - valueType: INT64 - unit: "1" - launchStage: ALPHA - metadata: - samplePeriod: "10s" - ingestDelay: "1s" diff --git a/dm/templates/stackdriver_metric_descriptor/stackdriver_metric_descriptor.py b/dm/templates/stackdriver_metric_descriptor/stackdriver_metric_descriptor.py deleted file mode 100644 index ef7d22d1a0b..00000000000 --- a/dm/templates/stackdriver_metric_descriptor/stackdriver_metric_descriptor.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" This template creates a Stackdriver Metric Descriptor. """ - - -def generate_config(context): - """ Entry point for the deployment resources. """ - - resources = [] - outputs = [] - properties = context.properties - project_id = properties.get('project', context.env['project']) - metric_descriptor = { - 'name': context.env['name'], - # https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors - 'type': 'gcp-types/monitoring-v3:projects.metricDescriptors', - 'properties': { - 'name': properties.get('name', context.env['name']), - # 'project': project_id, - } - } - - required_properties = [ - 'type', - 'metricKind', - 'valueType', - 'unit' - ] - - for prop in required_properties: - if prop in properties: - metric_descriptor['properties'][prop] = properties[prop] - - # Optional properties: - optional_properties = ['displayName', 'labels', 'description', 'metadata', 'launchStage'] - - for prop in optional_properties: - if prop in properties: - metric_descriptor['properties'][prop] = properties[prop] - - resources.append(metric_descriptor) - - # Output variables: - output_props = [ - 'name', - 'type', - 'labels', - 'metricKind', - 'valueType', - 'unit', - 'description', - 'displayName', - 'metadata' - ] - - for outprop in output_props: - output = {} - if outprop in properties: - output['name'] = outprop - output['value'] = '$(ref.{}.{})'.format(context.env['name'], outprop) - outputs.append(output) - - return {'resources': resources, 'outputs': outputs} diff --git a/dm/templates/stackdriver_metric_descriptor/stackdriver_metric_descriptor.py.schema b/dm/templates/stackdriver_metric_descriptor/stackdriver_metric_descriptor.py.schema deleted file mode 100644 index 0c99e4b4ffe..00000000000 --- a/dm/templates/stackdriver_metric_descriptor/stackdriver_metric_descriptor.py.schema +++ /dev/null @@ -1,242 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Stackdriver Metric Descriptor - author: Sourced Group Inc. - version: 1.0.0 - description: | - Supports creation of a Stackdriver Metric Descriptor. - - For more information on this resource, see - https://cloud.google.com/monitoring/ - - APIs endpoints used by this template: - - gcp-types/monitoring-v3:projects.metricDescriptors => - https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors - -additionalProperties: false - -allOf: - - required: - - type - - metricKind - - valueType - - oneOf: - - required: - - launchStage - - allOf: - - required: - - metadata - - properties: - metadata: - required: - - launchStage - - allOf: - - not: - required: - - launchStage - - oneOf: - - not: - required: - - metadata - - properties: - metadata: - not: - required: - - launchStage - - - oneOf: - - valueType: - enum: - - INT64 - - DOUBLE - - DISTRIBUTION - - not: - required: - - unit - -definitions: - launchStage: - type: string - description: | - The launch stage as defined by Google Cloud Platform Launch Stages: - http://cloud.google.com/terms/launch-stages. - enum: - - LAUNCH_STAGE_UNSPECIFIED - - EARLY_ACCESS - - ALPHA - - BETA - - GA - - DEPRECATED - -properties: - name: - type: string - description: | - The name of the Metric Descriptor. - Resource name would be used if omitted. - project: - type: string - description: | - The project ID of the project containing resources. The - Google apps domain is prefixed if applicable. - type: - type: string - description: | - The metric type, including the DNS name prefix. The type is not - URL-encoded. All user-defined metric types have the DNS name - custom.googleapis.com or external.googleapis.com. Metric types must - use a natural hierarchical grouping. - For example: - "custom.googleapis.com/invoice/paid/amount" - "external.googleapis.com/prometheus/up" - "appengine.googleapis.com/http/server/response_latencies" - For a list of metric types, see: - https://cloud.google.com/monitoring/api/metrics - labels: - type: array - description: | - The set of labels that can be used to describe a specific instance of - this metric type. For example, the - appengine.googleapis.com/http/server/response_latencies metric type has a - label for the HTTP response code, response_code. This enables you to look - at latencies for successful responses, or just for those responses that - had failed. - items: - type: object - description: The label description. - properties: - key: - type: string - description: The label key. - valueType: - type: string - description: The type of data that can be assigned to the label. - enum: - - STRING - - BOOL - - INT64 - description: - type: string - description: A human-readable description for the label. - metricKind: - type: string - description: | - Defines whether the metric records instantaneous values, changes to a - value, etc. Some combinations of metricKind and valueType might not be - supported. For detail, see: - https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors#MetricKind. - enum: - - METRIC_KIND_UNSPECIFIED - - GAUGE - - DELTA - - CUMULATIVE - valueType: - type: string - description: | - Defines whether the measurement is an integer, a floating-point number, - etc. Some combinations of metricKind and valueType might not be - supported. - enum: - - VALUE_TYPE_UNSPECIFIED - - BOOL - - INT64 - - DOUBLE - - STRING - - DISTRIBUTION - - MONEY - unit: - type: string - description: | - The unit in which the metric value is reported. Applicable if valueType - is INT64, DOUBLE, or DISTRIBUTION. - The supported units are a subset of The Unified Code for Units of - Measure standard. - For details, see: - http://unitsofmeasure.org/ucum.html - https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors#MetricDescriptor - description: - type: string - description: A detailed description of the metric. - displayName: - type: string - description: | - The metric name that can be displayed in user interfaces. Use sentence - case without the ending period; for example, "Request count". - Optional but recommended to be set for any metrics associated with - user-visible concepts, such as Quota. - launchStage: - $ref: '#/definitions/launchStage' - metadata: - type: object - description: | - Additional annotations that can be used to guide the usage of a metric. - properties: - launchStage: - $ref: '#/definitions/launchStage' - samplePeriod: - type: string - description: | - The sampling period for the metric data points. For metrics that are - written periodically, consecutive data points are stored at this time - interval, excluding data loss due to errors. Metrics with a higher - granularity have a smaller sampling period. - See here for more information: - https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Duration. - ingestDelay: - type: string - description: | - The delay of data points caused by ingestion. Data points older than - this age are guaranteed to be ingested and available to be read, - excluding data loss due to errors. - See here for more information: - https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Duration. - -outputs: - name: - type: string - description: The name of the Metric Descriptor. - type: - type: string - description: The metric type as defined in the descriptor. - labels: - type: array - description: List of the defined labels. - items: - type: string - metricKind: - type: string - description: The metric kind as defined. - valueType: - type: string - description: The measurement type (int, string, etc.). - unit: - type: string - description: The unit in which the metric value is reported. - description: - type: string - description: The metric description. - displayName: - type: string - description: The display name of the metric. - metadata: - type: object - description: Metadata associated with the metric. - -documentation: - - templates/stackdriver_metric_descriptor/README.md - -examples: - - templates/stackdriver_metric_descriptor/examples/metric_descriptor.yaml diff --git a/dm/templates/stackdriver_metric_descriptor/tests/integration/stackdriver_metric_descriptor.bats b/dm/templates/stackdriver_metric_descriptor/tests/integration/stackdriver_metric_descriptor.bats deleted file mode 100644 index 6d8e43cfad7..00000000000 --- a/dm/templates/stackdriver_metric_descriptor/tests/integration/stackdriver_metric_descriptor.bats +++ /dev/null @@ -1,98 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - # DM config file name must be 61 chars or less. - # Must be a match of regex '[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?' - DEPLOYMENT_NAME=`echo $DEPLOYMENT_NAME | cut -c 1-61` - CONFIG=".${DEPLOYMENT_NAME}.yaml" - # Test specific variables. - export METRIC_NAME="test-metric-${RAND}" - export METRIC_TYPE="custom.googleapis.com/agent/log_entry_retry_count" - export METRIC_KIND="CUMULATIVE" - export VALUE_TYPE="INT64" - export UNIT="1" - export LAUNCH_STAGE="ALPHA" - export SAMPLE_PERIOD="10s" - export INGEST_DELAY="1s" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < ${BATS_TEST_DIRNAME}/${TEST_NAME}.yaml > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - delete_config - rm -f "${RANDOM_FILE}" - fi - - # Per-test teardown steps. -} - -########## TESTS ########## - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - run gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config "${CONFIG}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] -} - -@test "Deleting deployment ${DEPLOYMENT_NAME}" { - run gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -q - [[ "$status" -eq 0 ]] -} - -########## NOTE ########### -# -# From Google Cloud SDK version 221.0.0, beta 2018.07.16, the only way to -# list a metric descriptor is to make an API call to the -# project.metricDescriptors resource type. Hence, no test assertions were -# written. -# -# The following logging commands do not list custom metricDescriptors: -# `gcloud logging metrics list` -# `gcloud beta logging metrics list` -# -# -# References: -# https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors -# https://cloud.google.com/monitoring/custom-metrics/creating-metrics -# -########################### diff --git a/dm/templates/stackdriver_metric_descriptor/tests/integration/stackdriver_metric_descriptor.yaml b/dm/templates/stackdriver_metric_descriptor/tests/integration/stackdriver_metric_descriptor.yaml deleted file mode 100644 index 2eb6eaf325f..00000000000 --- a/dm/templates/stackdriver_metric_descriptor/tests/integration/stackdriver_metric_descriptor.yaml +++ /dev/null @@ -1,32 +0,0 @@ -# Test of the Stackdriver Metric Descriptor template. - -########## NOTE ########################### -# These tests need to be run in the Stackdriver workspace. -# -# To list the current context: -# gcloud config list -# -# To switch context: -# gcloud config set project -# -########################################### - -imports: - - path: templates/stackdriver_metric_descriptor/stackdriver_metric_descriptor.py - name: metric_descriptor.py - -resources: - - name: metric - type: metric_descriptor.py - properties: - name: ${METRIC_NAME} - displayName: my test custom metric - description: My test custom metric descriptor - type: ${METRIC_TYPE} - metricKind: ${METRIC_KIND} - valueType: ${VALUE_TYPE} - unit: "${UNIT}" - launchStage: ${LAUNCH_STAGE} - metadata: - samplePeriod: "${SAMPLE_PERIOD}" - ingestDelay: "${INGEST_DELAY}" diff --git a/dm/templates/stackdriver_notification_channels/README.md b/dm/templates/stackdriver_notification_channels/README.md deleted file mode 100644 index efa93215e53..00000000000 --- a/dm/templates/stackdriver_notification_channels/README.md +++ /dev/null @@ -1,78 +0,0 @@ -# Stackdriver Notification Channels - -This template creates a Stackdriver Notification Channel. - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) -- Enable the [Stackdriver Monitoring AP](https://cloud.google.com/monitoring/api/ref_v3/rest/) -- Create a [Stackdriver Workspace](https://cloud.google.com/monitoring/workspaces/) -- Log in to the [Stackdriver Workspace](https://cloud.google.com/monitoring/workspaces/) - where the metric has to be deployed -- Grant the [monitoring.admin](https://cloud.google.com/monitoring/access-control) - IAM role to the Deployment Manager service account - -## Deployment - -### Resources - -- [projects.notificationChannels](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.notificationChannels) - -### Properties - -See the `properties` section in the schema file(s): - -- [Stackdriver Notification Channels](stackdriver_notification_channels.py.schema) - -### Usage - -1. Clone the [Deployment Manager samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit) - - ```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit - ``` - -2. Go to the [dm](../../) directory - - ```shell - cd dm - ``` - -3. Copy the example DM config to be used as a model for the deployment, - in this case [examples/stackdriver\_metric\_descriptor.yaml](examples/stackdriver_notification_channels.yaml) - - ```shell - cp templates/stackdriver_notification_channels/examples/stackdriver_notification_channels.yaml my_notification_channels.yaml - ``` - -4. Change the values in the config file to match your specific GCP setup. - Refer to the properties in the schema files described above. - - ```shell - vim my_notification_channels.yaml # <== Replace placeholders if any - ``` - -5. Set the project context to use the Stackdriver Workspace project - - ```shell - gcloud config set project - ``` - -6. Create your deployment as described below, replacing `` - with your with your own deployment name - - ```shell - gcloud deployment-manager deployments create \ - --config my_notification_channels.yaml - ``` - -7. In case you need to delete your deployment - - ```shell - gcloud deployment-manager deployments delete - ``` - -## Examples - -- [Stackdriver Notification Channels](examples/stackdriver_notification_channels.yaml) diff --git a/dm/templates/stackdriver_notification_channels/examples/stackdriver_notification_channels.yaml b/dm/templates/stackdriver_notification_channels/examples/stackdriver_notification_channels.yaml deleted file mode 100644 index f1bc04b7a92..00000000000 --- a/dm/templates/stackdriver_notification_channels/examples/stackdriver_notification_channels.yaml +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright 2019 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# Example of the Stackdriver Notification Channels template usage. - -imports: - - path: templates/stackdriver_notification_channels/stackdriver_notification_channels.py - name: stackdriver_notification_channels.py - -resources: - - name: my-notification-channels - type: stackdriver_notification_channels.py - properties: - notificationChannels: - - displayName: "name" - typeName: "slack" - labels: - channel_name: "#slack-channel" - auth_token: "token-1234567890" - policies: - - name: "1 - Availability - Cloud SQL Database - Memory usage (filtered) [MAX]" - documentationContent: "The janus rule ${condition.display_name} has generated this alert for the ${metric.display_name}." - conditions: - - displayName: "CloudSQL Memory" - filter: "metric.type=\"cloudsql.googleapis.com/database/memory/usage\" resource.type=\"cloudsql_database\" resource.label.database_id=\"sql_instance_id\"" - comparison: "COMPARISON_GT" - duration: "300s" - thresholdValue: 2750000000 - trigger: - count: 1 - aggregations: - - alignmentPeriod: "60s" - perSeriesAligner: "ALIGN_MAX" - crossSeriesReducer: "REDUCE_MEAN" - groupByFields: - - 'project' - diff --git a/dm/templates/stackdriver_notification_channels/stackdriver_notification_channels.py b/dm/templates/stackdriver_notification_channels/stackdriver_notification_channels.py deleted file mode 100644 index dc781d63ea0..00000000000 --- a/dm/templates/stackdriver_notification_channels/stackdriver_notification_channels.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright 2019 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" This template creates a Stackdriver Metric Descriptor. """ - - -def get_condition_threshold(condition_properties): - condition_threshold = {} - properties = [ - 'filter', - 'comparison', - 'duration', - 'thresholdValue', - 'trigger', - 'aggregations' - ] - for prop in condition_properties: - if prop in properties: - condition_threshold[prop] = condition_properties[prop] - return condition_threshold - - -def get_policy_conditions(policy_conditions): - conditions = [] - for condition in policy_conditions: - conditions.append({ - 'displayName': condition['displayName'], - 'conditionThreshold': get_condition_threshold(condition) - }) - return conditions - - -def generate_config(context): - """ Entry point for the deployment resources. """ - - resources = [] - outputs = [] - properties = context.properties - resource_name = context.env['name'] - project_id = properties.get('project', context.env['project']) - notification_channels = properties["notificationChannels"] - policies = properties["policies"] - - notification_channel_names = [] - - for index, channel in enumerate(notification_channels): - channel_type = channel['typeName'] - notification_channel_name = '{}-{}-{}'.format(resource_name, channel_type, index) - notification_channel_names.append('$(ref.{}.name)'.format(notification_channel_name)) - resources.append({ - 'name': notification_channel_name, - 'type': 'gcp-types/monitoring-v3:projects.notificationChannels', - 'properties': { - 'name': 'projects/{}'.format(project_id), - 'type': channel_type, - 'enabled': channel['channelEnabled'], - 'displayName': channel['displayName'], - 'labels': channel['labels'] - } - }) - - for index, policy in enumerate(policies): - resources.append({ - 'name': 'alerting-policy-{}-{}'.format(context.env['name'], index), - 'type': 'gcp-types/monitoring-v3:projects.alertPolicies', - 'properties': { - 'displayName': policy['name'], - 'documentation': { - 'content': policy['documentationContent'], - 'mimeType': policy['mimeType'] - }, - 'combiner': policy['combiner'], - 'enabled': policy['policyEnabled'], - 'conditions': get_policy_conditions(policy['conditions']), - 'notificationChannels': notification_channel_names - } - }) - - # Output variables: - output_props = [ - 'name', - 'type', - 'labels', - 'metricKind', - 'valueType', - 'unit', - 'description', - 'displayName', - 'metadata' - ] - - for outprop in output_props: - output = {} - if outprop in properties: - output['name'] = outprop - output['value'] = '$(ref.{}.{})'.format(resource_name, outprop) - outputs.append(output) - - return {'resources': resources, 'outputs': outputs} diff --git a/dm/templates/stackdriver_notification_channels/stackdriver_notification_channels.py.schema b/dm/templates/stackdriver_notification_channels/stackdriver_notification_channels.py.schema deleted file mode 100644 index ac0c992864b..00000000000 --- a/dm/templates/stackdriver_notification_channels/stackdriver_notification_channels.py.schema +++ /dev/null @@ -1,156 +0,0 @@ -# Copyright 2019 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Stackdriver Notification Channels - author: Sourced Group Inc. - version: 1.0.0 - description: | - Supports creation of the Stackdriver notification channel and alert resources. - - For more information on this resource: - - https://cloud.google.com/monitoring/alerts/using-channels-api - - https://cloud.google.com/monitoring/alerts/using-alerting-api - - APIs endpoints used by this template: - - gcp-types/monitoring-v3:projects.notificationChannels => - https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.notificationChannels - - gcp-types/monitoring-v3:projects.alertPolicies => - https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.alertPolicies - -required: - - notificationChannels - - policies - -properties: - notificationChannels: - type: array - minItems: 1 - description: | - A NotificationChannel is a medium through which an alert is delivered - when a policy violation is detected. Examples of channels include email, SMS, - and third-party messaging applications. Fields containing sensitive information - like authentication tokens or contact info are only partially populated on retrieval. - items: - type: object - additionalProperties: false - required: - - typeName - properties: - channelEnabled: - type: boolean - default: True - description: Whether or not the channel is enabled. - labels: - description: Configuration fields that define the channel and its behavior. - type: object - additionalProperties: true - verificationStatus: - description: Indicates whether this channel has been verified or not. - type: string - description: - description: An optional human-readable description of this notification channel. This may not exceed 1024 Unicode characters. - type: string - displayName: - type: string - description: An optional human-readable name for this notification channel. - typeName: - type: string - description: The type of the notification channel. - policies: - type: array - minItems: 1 - description: | - A description of the conditions under which some aspect of your system - is considered to be "unhealthy" and the ways to notify people or services about - this state. For an overview of alert policies, see Introduction to Alerting. - items: - type: object - additionalProperties: false - required: - - name - properties: - name: - description: | - A short name or phrase used to identify the policy in dashboards, - notifications, and incidents. To avoid confusion, don't use the same display - name for multiple policies in the same project. The name is limited to 512 - Unicode characters. - type: string - policyEnabled: - type: boolean - default: True - description: Whether or not the policy is enabled. - combiner: - description: | - The conditions are combined by AND or OR according - to the combiner field. If the combined conditions - evaluate to true, then an incident is created. - type: string - default: OR - conditions: - type: array - minItems: 1 - maxItems: 6 - description: A list of conditions for the policy. - documentationContent: - description: | - The text of the documentation, interpreted according to mime_type. - The content may not exceed 8,192 Unicode characters and may not exceed more - than 10,240 bytes when encoded in UTF-8 format, whichever is smaller. - type: string - mimeType: - description: Content type - type: string - default: text/markdown -outputs: - name: - type: string - description: The name of the Metric Descriptor. - type: - type: string - description: The metric type as defined in the descriptor. - labels: - type: array - description: List of the defined labels. - minItems: 0 - items: - type: string - metricKind: - type: string - description: The metric kind as defined. - valueType: - type: string - description: The measurement type (int, string, etc.). - unit: - type: string - description: The unit in which the metric value is reported. - description: - type: string - description: The metric description. - displayName: - type: string - description: The display name of the metric. - metadata: - type: object - additionalProperties: false - description: Metadata associated with the metric. - -documentation: - - templates/stackdriver_metric_descriptor/README.md - - - - - diff --git a/dm/templates/stackdriver_notification_channels/tests/integration/stackdriver_notification_channels.bats b/dm/templates/stackdriver_notification_channels/tests/integration/stackdriver_notification_channels.bats deleted file mode 100644 index b4465e78581..00000000000 --- a/dm/templates/stackdriver_notification_channels/tests/integration/stackdriver_notification_channels.bats +++ /dev/null @@ -1,116 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - # DM config file name must be 61 chars or less. - # Must be a match of regex '[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?' - DEPLOYMENT_NAME=`echo $DEPLOYMENT_NAME | cut -c 1-61` - CONFIG=".${DEPLOYMENT_NAME}.yaml" - # Test specific variables. - - export SLACK_CHANNEL_NAME="#slack-channel" - export SLACK_TOKEN="token-1234567890" - export SLACK_DISPLAY_NAME="name" - export SLACK_TYPE="slack" - export TEST_POLICY_NAME="1 - Availability - Cloud SQL Database - Memory usage (filtered) [MAX]" - export CONDITION_DISPLAY_NAME="CloudSQL Memory" - export CONDITION_FILTER="metric.type=\\\"cloudsql.googleapis.com/database/memory/usage\\\" resource.type=\\\"cloudsql_database\\\" resource.label.database_id=\\\"sql_instance_id\\\"" - export CONDITION_COMPARISON="COMPARISON_GT" - export CONDITION_DURATION="300s" - export CONDITION_THRESHOLD_VALUE=2750000000 - export CONDITION_TRIGGER_COUNT=1 - export CONDITION_AGGREGATION_ALIGNMENT_PERIOD="60s" - export CONDITION_AGGREGATION_ALIGNMENT_PER_SERIES="ALIGN_MAX" - export CONDITION_AGGREGATION_CROSS_SERIES_REDUCER="REDUCE_MEAN" - export CONDITION_AGGREGATION_GROUP_BY_FIELD="project" - export TEST_POLICY_DOCUMENTATION_CONTEXT="The janus rule \${condition.display_name} has generated this alert for the \${metric.display_name}." - - -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < ${BATS_TEST_DIRNAME}/${TEST_NAME}.yaml > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - delete_config - rm -f "${RANDOM_FILE}" - fi - - # Per-test teardown steps. -} - -########## TESTS ########## - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - run gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config "${CONFIG}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] -} - -@test "Check Slack notification channel configuration" { - run gcloud alpha monitoring channels list \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "displayName: ${SLACK_DISPLAY_NAME}" ]] - [[ "$output" =~ "channel_name: '${SLACK_CHANNEL_NAME}'" ]] - [[ "$output" =~ "type: ${SLACK_TYPE}" ]] - [[ "$output" =~ "name: projects/${CLOUD_FOUNDATION_PROJECT_ID}/notificationChannels/" ]] -} - -@test "Check alert policies" { - run gcloud alpha monitoring policies list \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "combiner: OR" ]] - [[ "$output" =~ "displayName: ${TEST_POLICY_NAME}" ]] - [[ "$output" =~ "alignmentPeriod: ${CONDITION_AGGREGATION_ALIGNMENT_PERIOD}" ]] - [[ "$output" =~ "crossSeriesReducer: ${CONDITION_AGGREGATION_CROSS_SERIES_REDUCER}" ]] - [[ "$output" =~ "perSeriesAligner: ${CONDITION_AGGREGATION_ALIGNMENT_PER_SERIES}" ]] - [[ "$output" =~ "comparison: ${CONDITION_COMPARISON}" ]] - [[ "$output" =~ "duration: ${CONDITION_DURATION}" ]] - [[ "$output" =~ "displayName: ${CONDITION_DISPLAY_NAME}" ]] - [[ "$output" =~ "count: ${CONDITION_TRIGGER_COUNT}" ]] -} - -@test "Deleting deployment ${DEPLOYMENT_NAME}" { - run gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -q - [[ "$status" -eq 0 ]] -} diff --git a/dm/templates/stackdriver_notification_channels/tests/integration/stackdriver_notification_channels.yaml b/dm/templates/stackdriver_notification_channels/tests/integration/stackdriver_notification_channels.yaml deleted file mode 100644 index 6d7d0d75952..00000000000 --- a/dm/templates/stackdriver_notification_channels/tests/integration/stackdriver_notification_channels.yaml +++ /dev/null @@ -1,45 +0,0 @@ -# Test of the Stackdriver Metric Descriptor template. - -########## NOTE ########################### -# These tests need to be run in the Stackdriver workspace. -# -# To list the current context: -# gcloud config list -# -# To switch context: -# gcloud config set project -# -########################################### - -imports: - - path: templates/stackdriver_notification_channels/stackdriver_notification_channels.py - name: stackdriver_notification_channels.py - -resources: - - name: my-notification-channels - type: stackdriver_notification_channels.py - properties: - notificationChannels: - - displayName: ${SLACK_DISPLAY_NAME} - typeName: ${SLACK_TYPE} - labels: - channel_name: "${SLACK_CHANNEL_NAME}" - auth_token: ${SLACK_TOKEN} - policies: - - name: ${TEST_POLICY_NAME} - documentationContent: "${TEST_POLICY_DOCUMENTATION_CONTEXT}" - conditions: - - displayName: ${CONDITION_DISPLAY_NAME} - filter: "${CONDITION_FILTER}" - comparison: ${CONDITION_COMPARISON} - duration: ${CONDITION_DURATION} - thresholdValue: ${CONDITION_THRESHOLD_VALUE} - trigger: - count: ${CONDITION_TRIGGER_COUNT} - aggregations: - - alignmentPeriod: ${CONDITION_AGGREGATION_ALIGNMENT_PERIOD} - perSeriesAligner: ${CONDITION_AGGREGATION_ALIGNMENT_PER_SERIES} - crossSeriesReducer: ${CONDITION_AGGREGATION_CROSS_SERIES_REDUCER} - groupByFields: - - ${CONDITION_AGGREGATION_GROUP_BY_FIELD} - diff --git a/dm/templates/target_proxy/README.md b/dm/templates/target_proxy/README.md deleted file mode 100644 index 941991ca47b..00000000000 --- a/dm/templates/target_proxy/README.md +++ /dev/null @@ -1,82 +0,0 @@ -# Target Proxy - -This template creates one of the following target proxy resources (depending on the parameters): - -- targetHttpProxy -- targetHttpsProxy -- targetTcpProxy -- targetSslProxy - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) -- Grant the [compute.loadBalancerAdmin](https://cloud.google.com/compute/docs/access/iam) - IAM role to the Deployment Manager service account -- To use the target TCP Proxy, request access to the Compute ALPHA features from the - Cloud [Support](https://cloud.google.com/support/) - -## Deployment - -### Resources - -- [compute.v1.targetHttpProxy](https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies) -- [compute.v1.targetHttpsProxy](https://cloud.google.com/compute/docs/reference/latest/targetHttpsProxies) -- [compute.alpha.targetTcpProxy](https://www.googleapis.com/discovery/v1/apis/compute/alpha/rest) -- [compute.v1.targetSslProxy](https://cloud.google.com/compute/docs/reference/latest/targetSslProxies) - -### Properties - -See the `properties` section in the schema file(s): - -- [Target Proxy](target_proxy.py.schema) - -### Usage - -1. Clone the [Deployment Manager Samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit): - -```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit -``` - -2. Go to the [dm](../../) directory: - -```shell - cd dm -``` - -3. Copy the example DM config to be used as a model for the deployment; in this - case, [examples/target\_proxy.yaml](examples/target_proxy.yaml): - -```shell - cp templates/target_proxy/examples/target_proxy.yaml \ - my_target_proxy.yaml -``` - -4. Change the values in the config file to match your specific GCP setup (for - properties, refer to the schema files listed above): - -```shell - vim my_target_proxy.yaml # <== change values to match your GCP setup -``` - -5. Create your deployment (replace \ with the relevant - deployment name): - -```shell - gcloud deployment-manager deployments create \ - --config my_target_proxy.yaml -``` - -6. In case you need to delete your deployment: - -```shell - gcloud deployment-manager deployments delete -``` - -## Examples - -- [Target HTTP Proxy](examples/target_proxy_http.yaml) -- [Target HTTPS Proxy](examples/target_proxy_https.yaml) -- [Target TCP Proxy](examples/target_proxy_tcp.yaml) -- [Target SSL Proxy](examples/target_proxy_ssl.yaml) diff --git a/dm/templates/target_proxy/examples/target_proxy_http.yaml b/dm/templates/target_proxy/examples/target_proxy_http.yaml deleted file mode 100644 index 99dd42cf7f1..00000000000 --- a/dm/templates/target_proxy/examples/target_proxy_http.yaml +++ /dev/null @@ -1,15 +0,0 @@ -# Example of the target proxy template usage. -# -# Replace the placeholder with a valid -# URL of the UrlMap resource. - -imports: - - path: templates/target_proxy/target_proxy.py - name: target_proxy.py - -resources: - - name: target-http-proxy - type: target_proxy.py - properties: - protocol: HTTP - target: diff --git a/dm/templates/target_proxy/examples/target_proxy_https.yaml b/dm/templates/target_proxy/examples/target_proxy_https.yaml deleted file mode 100644 index de26c69e96c..00000000000 --- a/dm/templates/target_proxy/examples/target_proxy_https.yaml +++ /dev/null @@ -1,19 +0,0 @@ -# Example of the target proxy template usage. -# -# Replace the following placeholders with valid values: -# : a URL of the UrlMap resource -# : a URL of the SslCertificate resource - -imports: - - path: templates/target_proxy/target_proxy.py - name: target_proxy.py - -resources: - - name: target-https-proxy - type: target_proxy.py - properties: - protocol: HTTPS - target: - ssl: - certificate: - url: diff --git a/dm/templates/target_proxy/examples/target_proxy_ssl.yaml b/dm/templates/target_proxy/examples/target_proxy_ssl.yaml deleted file mode 100644 index 36067f0aa50..00000000000 --- a/dm/templates/target_proxy/examples/target_proxy_ssl.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# Example of the target proxy template usage. -# -# Replace the following placeholders with valid values: -# : a URL of the BackendService resource -# : contents of the certificate file -# : contents of the private key file - -imports: - - path: templates/target_proxy/target_proxy.py - name: target_proxy.py - -resources: - - name: target-ssl-proxy - type: target_proxy.py - properties: - protocol: SSL - target: - ssl: - certificate: - certificate: - privateKey: diff --git a/dm/templates/target_proxy/examples/target_proxy_tcp.yaml b/dm/templates/target_proxy/examples/target_proxy_tcp.yaml deleted file mode 100644 index b4ecdc2907f..00000000000 --- a/dm/templates/target_proxy/examples/target_proxy_tcp.yaml +++ /dev/null @@ -1,15 +0,0 @@ -# Example of the target proxy template usage. -# -# Replace the placeholder with -# a valid URL of the BackendService resource. - -imports: - - path: templates/target_proxy/target_proxy.py - name: target_proxy.py - -resources: - - name: target-http-proxy - type: target_proxy.py - properties: - protocol: TCP - target: diff --git a/dm/templates/target_proxy/target_proxy.py b/dm/templates/target_proxy/target_proxy.py deleted file mode 100644 index d98079c5bd3..00000000000 --- a/dm/templates/target_proxy/target_proxy.py +++ /dev/null @@ -1,184 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" This template creates HTTP(S) and TCP/SSL proxy resources. """ - -import copy - -HTTP_BASE = True -TCP_BASE = False - - -def set_optional_property(destination, source, prop_name): - """ Copies the property value if present. """ - - if prop_name in source: - destination[prop_name] = source[prop_name] - - -def get_certificate(properties, project_id, res_name): - """ - Gets a link to an existing or newly created SSL Certificate - resource. - """ - - if 'url' in properties: - return properties['url'], [], [] - - name = '{}-ssl-cert'.format(res_name) - - resource = { - 'name': name, - 'type': 'ssl_certificate.py', - 'properties': copy.copy(properties) - } - resource['properties']['name'] = properties.get('name', name) - resource['properties']['project'] = project_id - - self_link = '$(ref.{}.selfLink)'.format(name) - outputs = [ - { - 'name': 'certificateName', - 'value': '$(ref.{}.name)'.format(name) - }, - { - 'name': 'certificateSelfLink', - 'value': self_link - } - ] - - return self_link, [resource], outputs - - -def get_insecure_proxy(is_http, res_name, project_id, properties, optional_properties): - """ Creates a TCP or HTTP Proxy resource. """ - - if is_http: - # https://cloud.google.com/compute/docs/reference/rest/v1/targetHttpProxies - type_name = 'gcp-types/compute-v1:targetHttpProxies' - target_prop = 'urlMap' - else: - # https://cloud.google.com/compute/docs/reference/rest/v1/targetTcpProxies - type_name = 'gcp-types/compute-v1:targetTcpProxies' - target_prop = 'service' - - resource_props = { - 'name': properties.get('name', res_name), - 'project': project_id, - } - resource = {'type': type_name, 'name': res_name, 'properties': resource_props} - - resource_props[target_prop] = properties['target'] - - for prop in optional_properties: - set_optional_property(resource_props, properties, prop) - - return [resource], [] - - -def get_secure_proxy(is_http, res_name, project_id, properties, optional_properties): - """ Creates an SSL or HTTPS Proxy resource. """ - - if is_http: - create_base_proxy = get_http_proxy - # https://cloud.google.com/compute/docs/reference/rest/v1/targetHttpsProxies - target_type = 'gcp-types/compute-v1:targetHttpsProxies' - else: - create_base_proxy = get_tcp_proxy - # https://cloud.google.com/compute/docs/reference/rest/v1/targetSslProxies - target_type = 'gcp-types/compute-v1:targetSslProxies' - - # Base proxy settings: - resources, outputs = create_base_proxy(properties, res_name, project_id) - resource = resources[0] - resource['type'] = target_type - resource_prop = resource['properties'] - for prop in optional_properties: - set_optional_property(resource_prop, properties, prop) - - # SSL settings: - ssl_resources = [] - ssl_outputs = [] - if 'sslCertificates' not in properties.get('ssl', []): - ssl = properties['ssl'] - url, ssl_resources, ssl_outputs = get_certificate(ssl['certificate'], project_id, res_name) - resource_prop['sslCertificates'] = [url] - set_optional_property(resource_prop, ssl, 'sslPolicy') - - if 'sslCertificates' in properties.get('ssl', []): - set_optional_property(resource_prop, properties['ssl'], 'sslCertificates') - - return resources + ssl_resources, outputs + ssl_outputs - - -def get_http_proxy(properties, res_name, project_id): - """ Creates the HTTP Proxy resource. """ - - return get_insecure_proxy(HTTP_BASE, res_name, project_id, properties, ['description']) - - -def get_tcp_proxy(properties, res_name, project_id): - """ Creates the TCP Proxy resource. """ - - optional_properties = ['description', 'proxyHeader'] - return get_insecure_proxy(TCP_BASE, res_name, project_id, properties, optional_properties) - - -def get_https_proxy(properties, res_name, project_id): - """ Creates the HTTPS Proxy resource. """ - - return get_secure_proxy(HTTP_BASE, res_name, project_id, properties, ['quicOverride']) - - -def get_ssl_proxy(properties, res_name, project_id): - """ Creates the SSL Proxy resource. """ - - return get_secure_proxy(TCP_BASE, res_name, project_id, properties, []) - - -def generate_config(context): - """ Entry point for the deployment resources. """ - - properties = context.properties - name = properties.get('name', context.env['name']) - project_id = properties.get('project', context.env['project']) - protocol = properties['protocol'] - - if protocol == 'SSL': - resources, outputs = get_ssl_proxy(properties, context.env['name'], project_id) - elif protocol == 'TCP': - resources, outputs = get_tcp_proxy(properties, context.env['name'], project_id) - elif protocol == 'HTTPS': - resources, outputs = get_https_proxy(properties, context.env['name'], project_id) - else: - resources, outputs = get_http_proxy(properties, context.env['name'], project_id) - - return { - 'resources': - resources, - 'outputs': - outputs + [ - { - 'name': 'name', - 'value': name - }, - { - 'name': 'selfLink', - 'value': '$(ref.{}.selfLink)'.format(context.env['name']) - }, - { - 'name': 'kind', - 'value': '$(ref.{}.kind)'.format(context.env['name']) - }, - ] - } diff --git a/dm/templates/target_proxy/target_proxy.py.schema b/dm/templates/target_proxy/target_proxy.py.schema deleted file mode 100644 index 7259ab13990..00000000000 --- a/dm/templates/target_proxy/target_proxy.py.schema +++ /dev/null @@ -1,191 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Target Proxy - author: Sourced Group Inc. - version: 1.0.1 - description: | - Depending on the configuration, supports creation of one of these proxy - resources: - - targetHttpProxy - - targetHttpsProxy - - targetTcpPProxy - - targetSslProxy - - For more information on this resource: - https://cloud.google.com/load-balancing/docs/target-proxies - - APIs endpoints used by this template: - - gcp-types/compute-v1:targetSslProxies => - https://cloud.google.com/compute/docs/reference/rest/v1/targetSslProxies - - gcp-types/compute-v1:targetHttpProxies => - https://cloud.google.com/compute/docs/reference/rest/v1/targetHttpProxies - - gcp-types/compute-v1:targetHttpsProxies => - https://cloud.google.com/compute/docs/reference/rest/v1/targetHttpsProxies - - gcp-types/compute-v1:targetTcpProxies => - https://cloud.google.com/compute/docs/reference/rest/v1/targetTcpProxies - -imports: - - path: ../ssl_certificate/ssl_certificate.py - name: ssl_certificate.py - -additionalProperties: false - -required: - - target - - protocol - -properties: - name: - type: string - description: | - Must comply with RFC1035. Specifically, the name must be 1-63 characters long and match - the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, - and all following characters must be a dash, lowercase letter, or digit, except the last character, - which cannot be a dash. - Resource name would be used if omitted. - project: - type: string - description: | - The project ID of the project containing resources. The - Google apps domain is prefixed if applicable. - description: - type: string - description: The resource description (optional). - protocol: - type: string - description: The protocol type for the resource to proxy. - enum: - - HTTP - - HTTPS - - TCP - - SSL - target: - type: string - description: | - The URL to the BackendService resource (for the TCP and SSL - protocols). The URL to the UrlMap resource that defines the mapping from - URL to the BackendService (for theHTTP and HTTPS protocols). - ssl: - type: object - description: | - Encryption settings for connections processed by the resource. Used for - HTTPS and SSL proxies only. - oneOf: - - required: - - sslCertificates - - required: - - certificate - properties: - sslCertificates: - type: array - uniqItems: true - description: | - URLs to SslCertificate resources that are used to authenticate connections to Backends. - At least one SSL certificate must be specified. Currently, you may specify up to 15 SSL certificates. - - Authorization requires the following Google IAM permission on the specified resource sslCertificates: - - compute.sslCertificates.get - minItems: 0 - maxItems: 15 - items: - type: string - certificate: - type: object - description: SSL certificate settings. - oneOf: - - required: - - url - - required: - - privateKey - - certificate - properties: - url: - type: string - description: The URL of an existing SSL certificate resource. - name: - type: string - description: The name of the SSL certificate resource. - description: - type: string - description: | - The description of the SSL certificate resource (optional). - privateKey: - type: string - description: The write-only private key in the PEM format. - certificate: - type: string - description: | - The local certificate file. The certificate must be in the PEM - format. The certificate chain must be no greater than 5 certs - long. The chain must include at least one intermediate cert. - sslPolicy: - type: string - description: | - The URL of the SslPolicy resource to be associated with this - resource. If not set, the proxy resource will have no SSL policy - configured. - proxyHeader: - type: string - default: NONE - description: | - The type of the proxy header to append before sending data to the - backend: NONE or PROXY_V1. The default is NONE. Used for the TCP and - SSL proxies only. - enum: - - NONE - - PROXY_V1 - quicOverride: - type: string - default: NONE - description: | - The QUIC override policy for the proxy resource. Determines whether - the load balancer will attempt to negotiate QUIC with clients. The values - are: NONE, ENABLE, or DISABLE. Enables QUIC when set to ENABLE; disables - QUIC when set to DISABLE; uses the QUIC policy with no user overrides - when set to NONE. If no value provided, defaults to NONE. Used for HTTPS - proxies only. - enum: - - NONE - - ENABLE - - DISABLE - -outputs: - name: - type: string - description: The resource name. - selfLink: - type: string - description: The URI (SelfLink) of the URL target proxy resource. - kind: - type: string - description: The resource kind. - certificateName: - type: string - description: The name of the SSL certificate, if one is to be created. - certificateSelfLink: - type: string - description: | - The URI (SelfLink) of the SSL certificate, if one is to be created. - -documentation: - - templates/target_proxy/README.md - -examples: - - templates/target_proxy/examples/target_proxy_http.yaml - - templates/target_proxy/examples/target_proxy_https.yaml - - templates/target_proxy/examples/target_proxy_tcp.yaml - - templates/target_proxy/examples/target_proxy_ssl.yaml diff --git a/dm/templates/target_proxy/tests/integration/target_proxy.bats b/dm/templates/target_proxy/tests/integration/target_proxy.bats deleted file mode 100755 index f8f290eeec8..00000000000 --- a/dm/templates/target_proxy/tests/integration/target_proxy.bats +++ /dev/null @@ -1,124 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores with dashes in the deployment name. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" - # Test specific variables. - export HTTPS_RES_NAME="https-proxy-${RAND}" - export URL_MAP_RES_NAME="url-map-${RAND}" - export HTTPS_QUIC_OVERRIDE="ENABLE" - export SSL_RES_NAME="ssl-proxy-${RAND}" - export SSL_NAME="ssl-proxy-name-${RAND}" - export SSL_DESCRIPTION="ssl-proxy-description-${RAND}" - export SSL_BS_RES_NAME="ssl-backend-service-${RAND}" - export PROXY_HEADER="PROXY_V1" - export SSL_CERT_NAME="ssl-certificate-${RAND}" - export SSL_POLICY_NAME="ssl-policy-${RAND}" - export HTTP_RES_NAME="http-proxy-${RAND}" - export HTTP_NAME="https-proxy-name-${RAND}" - export HTTP_DESCRIPTION="http-proxy-description-${RAND}" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < ${BATS_TEST_DIRNAME}/${TEST_NAME}.yaml > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - gcloud compute ssl-policies create "${SSL_POLICY_NAME}" \ - --profile MODERN --min-tls-version 1.2 \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - delete_config - gcloud compute ssl-policies delete "${SSL_POLICY_NAME}" -q \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - fi - - # Per-test teardown steps. -} - - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - run gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config ${CONFIG} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] -} - -@test "Verifying HTTP proxy" { - run gcloud compute target-http-proxies describe "${HTTP_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "description: ${HTTP_DESCRIPTION}" ]] - [[ "$output" =~ "${URL_MAP_RES_NAME}" ]] -} - -@test "Verifying HTTPS proxy" { - run gcloud compute target-https-proxies describe "${HTTPS_RES_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "quicOverride: ${HTTPS_QUIC_OVERRIDE}" ]] - [[ "$output" =~ "${URL_MAP_RES_NAME}" ]] - [[ "$output" =~ "${SSL_CERT_NAME}" ]] -} - -@test "Verifying HTTPS proxy sslCertificates" { - run gcloud compute target-https-proxies describe "${HTTPS_RES_NAME}-2" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "quicOverride: ${HTTPS_QUIC_OVERRIDE}" ]] - [[ "$output" =~ "${URL_MAP_RES_NAME}" ]] - [[ "$output" =~ "${SSL_CERT_NAME}" ]] -} - - -@test "Verifying SSL proxy" { - run gcloud compute target-ssl-proxies describe "${SSL_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "description: ${SSL_DESCRIPTION}" ]] - [[ "$output" =~ "proxyHeader: ${PROXY_HEADER}" ]] - [[ "$output" =~ "${SSL_CERT_NAME}" ]] - [[ "$output" =~ "${SSL_POLICY_NAME}" ]] - [[ "$output" =~ "${SSL_BS_RES_NAME}" ]] -} - -@test "Deleting deployment" { - run gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" -q \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] -} - diff --git a/dm/templates/target_proxy/tests/integration/target_proxy.yaml b/dm/templates/target_proxy/tests/integration/target_proxy.yaml deleted file mode 100644 index e31890425ed..00000000000 --- a/dm/templates/target_proxy/tests/integration/target_proxy.yaml +++ /dev/null @@ -1,162 +0,0 @@ -# Test of the target proxy template. - -imports: - - path: templates/target_proxy/target_proxy.py - name: target_proxy.py - -resources: - - name: test-proxy - type: target_proxy.py - properties: - name: ${HTTPS_RES_NAME} - protocol: HTTPS - target: $(ref.${URL_MAP_RES_NAME}.selfLink) - quicOverride: ${HTTPS_QUIC_OVERRIDE} - ssl: - certificate: - url: $(ref.${SSL_RES_NAME}.certificateSelfLink) - - - name: test-proxy-2 - type: target_proxy.py - properties: - name: ${HTTPS_RES_NAME}-2 - protocol: HTTPS - target: $(ref.${URL_MAP_RES_NAME}.selfLink) - quicOverride: ${HTTPS_QUIC_OVERRIDE} - ssl: - sslCertificates: - - $(ref.${SSL_RES_NAME}.certificateSelfLink) - - - name: ${SSL_RES_NAME} - type: target_proxy.py - properties: - name: ${SSL_NAME} - description: ${SSL_DESCRIPTION} - protocol: SSL - target: $(ref.${SSL_BS_RES_NAME}.selfLink) - proxyHeader: ${PROXY_HEADER} - ssl: - sslPolicy: https://www.googleapis.com/compute/v1/projects/${CLOUD_FOUNDATION_PROJECT_ID}/global/sslPolicies/${SSL_POLICY_NAME} - certificate: - name: ${SSL_CERT_NAME} - certificate: | - -----BEGIN CERTIFICATE----- - MIIDODCCAiACCQCqBGuEeBXJTjANBgkqhkiG9w0BAQUFADBeMQswCQYDVQQGEwJD - QTEQMA4GA1UECAwHT250YXJpbzEQMA4GA1UEBwwHVG9yb250bzEVMBMGA1UECgwM - RXhhbXBsZSBPcmcuMRQwEgYDVQQDDAtleGFtcGxlLmNvbTAeFw0xODEwMTEyMDEy - MjVaFw0xOTEwMTEyMDEyMjVaMF4xCzAJBgNVBAYTAkNBMRAwDgYDVQQIDAdPbnRh - cmlvMRAwDgYDVQQHDAdUb3JvbnRvMRUwEwYDVQQKDAxFeGFtcGxlIE9yZy4xFDAS - BgNVBAMMC2V4YW1wbGUuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC - AQEA5yLVBWSqKRWSJrGh2nbW0j+Soy+uvDKCjSZtXIVIcWvSP+WXd6RE2zlsNee4 - UA90Jjhx6Si2MxraCNblm4MJSQg46irBn4Vmex/2iY2vD1MbMEusTNIfpDh3A27H - qdBe+WSO0uHXfmgC68M8mgTMcMLVoUsGgv8B710qBN1D9EKPZdEhYBAEhC1RFj+8 - o3krWa983QFVND4PeFJSZe0LxAg44/bHVFNKK5Ub+TPa9P3t86SpoNj9/M7DPSkh - AtqmHL+90G0gvkfvtaoNRHHdqk2X+Uz3cUXWgl8xE+X5wcWu7r3cMxPaJvuLldJa - SIcc4YIlKOB0nFDKPK+iy7LqPwIDAQABMA0GCSqGSIb3DQEBBQUAA4IBAQAdf4dG - IkEmdNmTeGPVOUis+7ziWzRPxt8Vpmuq24z4H9mIkAPo/2aLpMKH7bloeYvz8blu - 5VQZx7StoE5Sde1ss/AoaL1dVJi/dgmeN2cHy7J6POu3e9n6yXGiIh0qHlFe83nJ - RVIqtN9QGGuabGt3WGbKElMKwrCl9NGhExi/LntPFllXfTLb2pVGXH47ZihynbUj - 4S21+KnQUPjhg6Na6hP3qLVqSYtWataJFpy6DOG1wgoAWjagNc3ltGdmv6O/ZkI3 - 3vymENyn8G7n+Z1knXUXxv4rJoeiYbZ7/2/bQ8BTc/RI5qnhzO8VYmyAZrrKGZnD - W2xuikK4nQHsideP - -----END CERTIFICATE----- - privateKey: | - -----BEGIN RSA PRIVATE KEY----- - MIIEowIBAAKCAQEA5yLVBWSqKRWSJrGh2nbW0j+Soy+uvDKCjSZtXIVIcWvSP+WX - d6RE2zlsNee4UA90Jjhx6Si2MxraCNblm4MJSQg46irBn4Vmex/2iY2vD1MbMEus - TNIfpDh3A27HqdBe+WSO0uHXfmgC68M8mgTMcMLVoUsGgv8B710qBN1D9EKPZdEh - YBAEhC1RFj+8o3krWa983QFVND4PeFJSZe0LxAg44/bHVFNKK5Ub+TPa9P3t86Sp - oNj9/M7DPSkhAtqmHL+90G0gvkfvtaoNRHHdqk2X+Uz3cUXWgl8xE+X5wcWu7r3c - MxPaJvuLldJaSIcc4YIlKOB0nFDKPK+iy7LqPwIDAQABAoIBACHHm25gWeYLOwLg - rxDokVjE5b9ntpfWofHTDeSZrg61fVLNUSexIEcRy1jNdshsmcMEONGkm4w4fmkQ - Txo9OlwsEXVXrliL/IA+GZ/czxrkQHL8fD/17Z3oiqw7wn5074xvP9heHUpiFRsz - u2WfEeng76vU9Syr7DJ5YSy40beew48gJsfclbbAWl1GQ4s1wluoMlutCWjRKSAD - Qg1pjJQuTHDpD+PNgHrx4Xbyjyo6tGqcdt9B0crhuxwTZXUogQsRKRuMHlxxBsbm - kINhSsNf8V8iRCBtZ4FPWcq+Rk/KntNzB9NZQFmrH5hS0oQmZjzNAyzCXIaYTji7 - Ju8XDaECgYEA9hUDBBwniphLZxvIC8GHVgAFx76Xw085bksVI0jNl2yG1HgNjCNA - W7DXJnyAtJZQjaItZfvB/tMm/ZAypf77tnru2n/uRvB4uG1Yh7RSy7rhLpibvTpU - e+DHm2c6kVW6Ng4q6rFxaunpjKEaeZO8pKowUu4YGU9YaSqvIGwoPFcCgYEA8HOc - 1J5Rop56BPvJgozqQRRQ3Q3AFfzlyYEniF35twIqnehemU3nJMdVp9jbZizOcrmu - ZBma5c5P5Bjgam3SQvswTUxmbIZ2VvvXOv5aPeldNdFHrADpVmOdKwcPxQ8qx+IT - GK3rrVRkH6+JByseHhxl3igIM7fAtbd27ENDkFkCgYEA9YmhqMgu7CtpkUg3IwPH - dhgvrE6QP2EdfN+OB9bszNqM7hOb8Oh7nwGkq9Iu2gHh/nCDu+6ocwtdLERlRRxX - LI0dJwffSQlIaz0vyLg0pPOjHEtJmlZJVhHDGVy3I6zWUHlyeRr0gClFz/wv3n97 - CxKFhTns8dQp80WT2FYTD6ECgYBU0KMYSIQJNZda3Km22BflPtJLNwdzehJf4qPc - MTHdQPFhY87Cir0mtv1ayF6TiuiDhUWjX3jI6N47Wh8Gy5goMkxWZ8WVMFTb19eS - opeYURGk4x5B6MxlwZt1yvbgDrqLaQ5NXUPNjwAGQTe3hJkKDABOvZYvD/j04DMd - oZhaeQKBgGGgnxTTUTEdqZ/AsVD0NmaqauTmyjsUpmAph9oazERM729n9igob85z - KXQmD9gmtTrCuv8LGyEPFsIhlBTOlLyzHpMhI2Hd23hzQp8v09ZdDpx8SqHv0THW - y8YMreKih6+reSfC+GuOzQoKi4vTKO7wwuXYysXkg3juupqZ7Kab - -----END RSA PRIVATE KEY----- - - - name: ${HTTP_RES_NAME} - type: target_proxy.py - properties: - name: ${HTTP_NAME} - description: ${HTTP_DESCRIPTION} - protocol: HTTP - target: $(ref.${URL_MAP_RES_NAME}.selfLink) - -# Test prerequisites: - - name: ${URL_MAP_RES_NAME} - type: compute.v1.urlMap - properties: - defaultService: $(ref.http-backend-service-${RAND}.selfLink) - - - name: ${SSL_BS_RES_NAME} - type: compute.v1.backendService - properties: - loadBalancingScheme: EXTERNAL - protocol: SSL - backends: - - group: $(ref.instance-group-manager-${RAND}.instanceGroup) - healthChecks: - - $(ref.ssl-health-check-${RAND}.selfLink) - - - name: http-backend-service-${RAND} - type: compute.v1.backendService - properties: - loadBalancingScheme: EXTERNAL - protocol: HTTP - backends: - - group: $(ref.instance-group-manager-${RAND}.instanceGroup) - healthChecks: - - $(ref.http-health-check-${RAND}.selfLink) - - - name: instance-group-manager-${RAND} - type: compute.v1.instanceGroupManager - properties: - instanceTemplate: $(ref.instance-template-${RAND}.selfLink) - zone: us-east1-c - targetSize: 1 - namedPorts: - - name: http - port: 80 - - - name: instance-template-${RAND} - type: compute.v1.instanceTemplate - properties: - properties: - machineType: f1-micro - disks: - - autoDelete: true - boot: true - deviceName: boot - initializeParams: - sourceImage: projects/ubuntu-os-cloud/global/images/family/ubuntu-1804-lts - type: PERSISTENT - networkInterfaces: - - accessConfigs: - - name: External NAT - type: ONE_TO_ONE_NAT - network: global/networks/default - - - name: http-health-check-${RAND} - type: compute.v1.httpHealthCheck - - - name: ssl-health-check-${RAND} - type: compute.v1.healthCheck - properties: - type: SSL - sslHealthCheck: - port: 443 diff --git a/dm/templates/unmanaged_instance_group/README.md b/dm/templates/unmanaged_instance_group/README.md deleted file mode 100644 index a271b27af86..00000000000 --- a/dm/templates/unmanaged_instance_group/README.md +++ /dev/null @@ -1,68 +0,0 @@ -# Unmanaged Instance Group - -This template creates a unmanaged instance group. - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, setup billing, enable requisite APIs](../project/README.md) -- Grant the [compute.admin](https://cloud.google.com/compute/docs/access/iam) IAM role to the [Deployment Manager service account](https://cloud.google.com/deployment-manager/docs/access-control#access_control_for_deployment_manager) - -## Deployment - -### Resources - -- [compute.v1.instanceGroups](https://cloud.google.com/compute/docs/reference/latest/instanceGroups) - -### Properties - -See the `properties` section in the schema file(s): - -- [Unmanaged Instance Group](unmanaged_instance_group.py.schema) - -### Usage - -1. Clone the [Deployment Manager samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit) - -```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit -``` - -2. Go to the [dm](../../) directory - -```shell - cd dm -``` - -3. Copy the example DM config to be used as a model for the deployment, in this - case [examples/unmanaged\_instance\_group\_add\_instance.yaml](examples/unmanaged_instance_group_add_instance.yaml) - -```shell - cp templates/unmanaged_instance_group/examples/unmanaged_instance_group_add_instance.yaml \ - my_unmanaged_instance_group_add_instance.yaml -``` - -4. Change the values in the config file to match your specific GCP setup. - Refer to the properties in the schema files described above. - -```shell - vim my_unmanaged_instance_group_add_instance.yaml # <== change values to match your GCP setup -``` - -5. Create your deployment as described below, replacing - \ with your with your own deployment name - -```shell - gcloud deployment-manager deployments create \ - --config my_unmanaged_instance_group_add_instance.yaml -``` - -6. In case you need to delete your deployment: - -```shell - gcloud deployment-manager deployments delete -``` - -## Examples - -- [Unmanaged Instance Group](examples/unmanaged_instance_group.yaml) diff --git a/dm/templates/unmanaged_instance_group/examples/unmanaged_instance_group_add_instance.yaml b/dm/templates/unmanaged_instance_group/examples/unmanaged_instance_group_add_instance.yaml deleted file mode 100644 index e5acf100368..00000000000 --- a/dm/templates/unmanaged_instance_group/examples/unmanaged_instance_group_add_instance.yaml +++ /dev/null @@ -1,15 +0,0 @@ -# Example of the Unmanaged Instance Group template usage. -# -# In this example, a simple unmanaged instance group is created and 2 instances added. - -imports: - - path: templates/unmanaged_instance_group/unmanaged_instance_group.py - name: unmanaged_instance_group.py - -resources: - - name: unmanaged-instance-group-example - type: unmanaged_instance_group.py - properties: - project: - name: - zone: \ No newline at end of file diff --git a/dm/templates/unmanaged_instance_group/examples/unmanaged_instance_group_remove_instance.yaml b/dm/templates/unmanaged_instance_group/examples/unmanaged_instance_group_remove_instance.yaml deleted file mode 100644 index 282450a5b66..00000000000 --- a/dm/templates/unmanaged_instance_group/examples/unmanaged_instance_group_remove_instance.yaml +++ /dev/null @@ -1,16 +0,0 @@ -# Example of the Unmanaged Instance Group template usage. -# -# In this example, 1 instance removed from unmanaged instance group, created in -# `unmanaged_instance_group_add_instance.yaml` example. - -imports: - - path: templates/unmanaged_instance_group/unmanaged_instance_group.py - name: unmanaged_instance_group.py - -resources: - - name: unmanaged-instance-group-example - type: unmanaged_instance_group.py - properties: - project: - name: - zone: diff --git a/dm/templates/unmanaged_instance_group/tests/integration/unmanaged_instance_group.bats b/dm/templates/unmanaged_instance_group/tests/integration/unmanaged_instance_group.bats deleted file mode 100755 index c5f226e22bc..00000000000 --- a/dm/templates/unmanaged_instance_group/tests/integration/unmanaged_instance_group.bats +++ /dev/null @@ -1,102 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" - export UMIG_NAME="umig-${RAND}" - export UMIG_RES_NAME="umig-${RAND}" - export ZONE="us-central1-c" - export PORT_NAME="http" - export PORT="80" - export INSTANCE_NAME="test-umig-instance-${RAND}" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < templates/unmanaged_instance_group/tests/integration/${TEST_NAME}.yaml > "${CONFIG}" -} - -function delete_config() { - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - # Needed for testing resource creation with preexisting (not referenced) - # instance - gcloud compute instances create "${INSTANCE_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --zone "${ZONE}" - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - rm -f "${RANDOM_FILE}" - gcloud compute instances delete "${INSTANCE_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --zone "${ZONE}" -q - delete_config - fi - - # Per-test teardown steps. -} - - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - run gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config "${CONFIG}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "$output" - [[ "$status" -eq 0 ]] -} - -@test "Verifying that unmanaged intance group was created" { - run gcloud compute instance-groups unmanaged list \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - echo "$output" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${UMIG_NAME}" ]] - [[ "$output" =~ "${ZONE}" ]] -} - -@test "Verifying unmanaged instance group properties" { - run gcloud compute instance-groups unmanaged describe "${UMIG_NAME}" \ - --zone "${ZONE}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "instanceGroups/${UMIG_NAME}" ]] - [[ "$output" =~ "name: ${PORT_NAME}" ]] - [[ "$output" =~ "port: ${PORT}" ]] - [[ "$output" =~ "size: 1" ]] -} - -@test "Deleting deployment" { - run gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -q - [[ "$status" -eq 0 ]] -} - diff --git a/dm/templates/unmanaged_instance_group/tests/integration/unmanaged_instance_group.yaml b/dm/templates/unmanaged_instance_group/tests/integration/unmanaged_instance_group.yaml deleted file mode 100644 index 90a39c2dc68..00000000000 --- a/dm/templates/unmanaged_instance_group/tests/integration/unmanaged_instance_group.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# Test of the Unanaged Instance Group template. - -imports: - - path: templates/unmanaged_instance_group/unmanaged_instance_group.py - name: unmanaged_instance_group.py - -resources: - - name: ${UMIG_RES_NAME} - type: unmanaged_instance_group.py - properties: - name: ${UMIG_NAME} - zone: ${ZONE} - namedPorts: - - name: ${PORT_NAME} - port: ${PORT} - -# Test prerequisites: -# - name: ${INSTANCE_NAME} -# type: compute.v1.instances -# properties: -# zone: ${ZONE} diff --git a/dm/templates/unmanaged_instance_group/unmanaged_instance_group.py b/dm/templates/unmanaged_instance_group/unmanaged_instance_group.py deleted file mode 100644 index 870f3dafef1..00000000000 --- a/dm/templates/unmanaged_instance_group/unmanaged_instance_group.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright 2019 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" This template creates an unmanaged instance group. """ - -def set_optional_property(receiver, source, property_name): - """ If set, copies the given property value from one object to another. """ - - if property_name in source: - receiver[property_name] = source[property_name] - - -def generate_instance_url(project, zone, instance): - """ Format the resource name as a resource URI. """ - - is_self_link = '/' in instance or '.' in instance - - if is_self_link: - instance_url = instance - else: - instance_url = 'projects/{}/zones/{}/instances/{}' - instance_url = instance_url.format(project, zone, instance) - - return instance_url - - -def generate_config(context): - """ Entry point for the deployment resources. """ - - properties = context.properties - name = properties.get('name', context.env['name']) - project_id = properties.get('project', context.env['project']) - zone = properties.get('zone') - - # Network formatting - if 'network' in properties: - network_name = properties.get('network') - if not '/' in network_name: - network_propertie = { 'network': 'global/networks/{}'.format(network_name) } - else: - network_propertie = { 'network': network_name } - else: - network_propertie = {} - - properties.update(network_propertie) - - # Create unmanaged instance group resource - umig_properties = { - 'name': name, - 'project': project_id, - 'zone': zone - } - - known_properties = [ - 'description', - 'namedPorts', - 'region', - 'network', - ] - - for prop in known_properties: - set_optional_property(umig_properties, properties, prop) - - umig_resources = [ - { - 'name': name, - 'type': 'gcp-types/compute-v1:instanceGroups', - 'properties': umig_properties - }, - ] - - # If instances are specified, add/remove them to/from the group. - add_instances_resources = [] - remove_instances_resources = [] - instances = properties.get('instances', { - 'add': [], - 'delete': [] - }) - - # Generate outputs - umig_outputs = [ - { - 'name': 'selfLink', - 'value': '$(ref.{}.selfLink)'.format(name) - }, - { - 'name': 'name', - 'value': '$(ref.{}.name)'.format(name) - }, - { - 'name': 'zone', - 'value': '$(ref.{}.zone)'.format(name) - } - ] - - return { - 'resources': umig_resources + add_instances_resources + remove_instances_resources, - 'outputs': umig_outputs - } diff --git a/dm/templates/unmanaged_instance_group/unmanaged_instance_group.py.schema b/dm/templates/unmanaged_instance_group/unmanaged_instance_group.py.schema deleted file mode 100644 index d0000a76711..00000000000 --- a/dm/templates/unmanaged_instance_group/unmanaged_instance_group.py.schema +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright 2019 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: Unmanaged Instance Group - author: Google Cloud PSO and partners - version: 1.2.0 - description: | - Creates a unmanaged instance group. - - For more information on this resource: - https://cloud.google.com/compute/docs/instance-groups/ - - APIs endpoints used by this template: - - gcp-types/compute-v1:instanceGroups => - https://cloud.google.com/compute/docs/reference/rest/v1/instanceGroups - - gcp-types/compute-v1:compute.instanceGroups.addInstances => - https://cloud.google.com/compute/docs/reference/rest/v1/instanceGroups/addInstances - -additionalProperties: false - -required: - - zone - -properties: - name: - type: string - description: The name of the unmanaged instance group. Resource name would be used if omitted. - project: - type: string - description: | - The project ID of the project containing the instance. - description: - type: string - description: An optional description of the resource. - zone: - type: string - description: | - The name of the zone where the unmanaged instance group is located. - namedPorts: - type: array - uniqueItems: true - description: | - A list of the named ports configured for the instance groups - complementary to the Instance Group Manager. - items: - type: object - additionalProperties: false - required: - - name - - port - properties: - name: - type: string - description: The port name. - port: - type: integer - minimum: 1 - maximum: 65535 - description: The port number. - network: - type: string - description: | - The URL of the network to which all instances in the instance group belong. - -outputs: - selfLink: - type: string - description: The URL (SelfLink) of the unmanaged instance group resource. - name: - type: string - description: The name of the unmanaged instance group resource. - zone: - type: string - description: | - The name of the zone where the unmanaged instance group is located. - -documentation: - - templates/unmanaged_instance_group/README.md - -examples: - - templates/unmanaged_instance_group/examples/unmanaged_instance_group.yaml diff --git a/dm/templates/url_map/README.md b/dm/templates/url_map/README.md deleted file mode 100644 index ca1bf5c36d3..00000000000 --- a/dm/templates/url_map/README.md +++ /dev/null @@ -1,70 +0,0 @@ -# URL Map - -This template creates a URL map. - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) -- Grant the [compute.networkAdmin](https://cloud.google.com/compute/docs/access/iam) - or [compute.loadBalancerAdmin](https://cloud.google.com/compute/docs/access/iam) - IAM role to the Deployment Manager service account - -## Deployment - -### Resources - -- [compute.v1.urlMap](https://cloud.google.com/compute/docs/reference/rest/v1/urlMaps) - -### Properties - -See the `properties` section in the schema file(s): - -- [URL Map](url_map.py.schema) - -### Usage - -1. Clone the [Deployment Manager Samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit): - -```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit -``` - -2. Go to the [dm](../../) directory: - -```shell - cd dm -``` - -3. Copy the example DM config to be used as a model for the deployment; in this - case, [examples/url\_map.yaml](examples/url_map.yaml): - -```shell - cp templates/url_map/examples/url_map.yaml \ - my_url_map.yaml -``` - -4. Change the values in the config file to match your specific GCP setup (for - properties, refer to the schema files listed above): - -```shell - vim my_url_map.yaml # <== change values to match your GCP setup -``` - -5. Create your deployment (replace \ with the relevant - deployment name): - -```shell - gcloud deployment-manager deployments create \ - --config my_url_map.yaml -``` - -6. In case you need to delete your deployment: - -```shell - gcloud deployment-manager deployments delete -``` - -## Examples - -- [URL Map](examples/url_map.yaml) diff --git a/dm/templates/url_map/examples/url_map.yaml b/dm/templates/url_map/examples/url_map.yaml deleted file mode 100644 index 798a092ccff..00000000000 --- a/dm/templates/url_map/examples/url_map.yaml +++ /dev/null @@ -1,32 +0,0 @@ -# Example of the URL map template usage. -# -# Replace the following placeholders with valid values: -# : a URL of the backend service to handle requests -# when none of the host rules match -# : a host name for which the traffic must be handled by -# 'media-matcher' -# : a URL of the backend service to handle requests -# starting with the /media path -# : a URL of the backend service to handle requests -# of the matched hosts that do not start with the /media path - -imports: - - path: templates/url_map/url_map.py - name: url_map.py - -resources: - - name: test-url-map - type: url_map.py - properties: - defaultService: - hostRules: - - hosts: - - - pathMatcher: media-matcher - pathMatchers: - - name: media-matcher - defaultService: - pathRules: - - service: - paths: - - /media diff --git a/dm/templates/url_map/tests/integration/url_map.bats b/dm/templates/url_map/tests/integration/url_map.bats deleted file mode 100755 index 5e48ba91069..00000000000 --- a/dm/templates/url_map/tests/integration/url_map.bats +++ /dev/null @@ -1,114 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores with dashes in the deployment name. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" - # Test specific variables. - export RES_NAME="url-map-${RAND}" - export NAME="url-map-name-${RAND}" - export DESCRIPTION="url-map-description" - export BACKEND_SERVICE_NAME="external-backend-service-${RAND}" - export IGM_NAME="zonal-igm-http-${RAND}" - export IT_NAME="instance-template-${RAND}" - export HC_NAME="test-healthcheck-http-test" - export PORT="80" - export HOST="example.com" - export PATH1="/audio" - export PATH2="/video" - export MATCHER_NAME="default-matcher" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < ${BATS_TEST_DIRNAME}/${TEST_NAME}.yaml > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - create_config - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - delete_config - fi - - # Per-test teardown steps. -} - - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - run gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --config ${CONFIG} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] -} - -@test "Verifying URL map properties" { - run gcloud compute url-maps describe "${NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "description: ${DESCRIPTION}" ]] -} - -@test "Verifying URL map default backend" { - run gcloud compute url-maps describe "${NAME}" \ - --format "yaml(defaultService)" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "${BACKEND_SERVICE_NAME}" ]] -} - -@test "Verifying path matcher" { - run gcloud compute url-maps describe "${NAME}" \ - --format "yaml(pathMatchers)" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "name: ${MATCHER_NAME}" ]] - [[ "$output" =~ "${BACKEND_SERVICE_NAME}" ]] -} - -@test "Verifying path matcher paths" { - run gcloud compute url-maps describe "${NAME}" \ - --format "yaml(pathMatchers)" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] - [[ "$output" =~ "- ${PATH1}" ]] - [[ "$output" =~ "- ${PATH2}" ]] - [[ "$output" =~ "${BACKEND_SERVICE_NAME}" ]] -} - -@test "Deleting deployment" { - run gcloud deployment-manager deployments delete "${DEPLOYMENT_NAME}" -q \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ "$status" -eq 0 ]] -} - diff --git a/dm/templates/url_map/tests/integration/url_map.yaml b/dm/templates/url_map/tests/integration/url_map.yaml deleted file mode 100644 index 8a23572a8c0..00000000000 --- a/dm/templates/url_map/tests/integration/url_map.yaml +++ /dev/null @@ -1,78 +0,0 @@ -# Test of the URL map template. - -imports: - - path: templates/url_map/url_map.py - name: url_map.py - -resources: - - name: ${RES_NAME} - type: url_map.py - properties: - name: ${NAME} - description: ${DESCRIPTION} - defaultService: $(ref.${BACKEND_SERVICE_NAME}.selfLink) - hostRules: - - hosts: - - ${HOST} - pathMatcher: ${MATCHER_NAME} - pathMatchers: - - name: ${MATCHER_NAME} - defaultService: $(ref.${BACKEND_SERVICE_NAME}.selfLink) - pathRules: - - service: $(ref.${BACKEND_SERVICE_NAME}.selfLink) - paths: - - ${PATH1} - - ${PATH2} - -# Test prerequisites. - - - name: ${BACKEND_SERVICE_NAME} - type: compute.v1.backendService - properties: - loadBalancingScheme: EXTERNAL - protocol: HTTP - backends: - - group: $(ref.${IGM_NAME}.instanceGroup) - healthChecks: - - $(ref.${HC_NAME}.selfLink) - - - name: ${IGM_NAME} - type: compute.v1.instanceGroupManager - properties: - instanceTemplate: $(ref.${IT_NAME}.selfLink) - zone: us-east1-c - targetSize: 1 - namedPorts: - - name: http - port: ${PORT} - - - name: ${IT_NAME} - type: compute.v1.instanceTemplate - properties: - properties: - machineType: f1-micro - disks: - - autoDelete: true - boot: true - deviceName: boot - initializeParams: - sourceImage: projects/ubuntu-os-cloud/global/images/family/ubuntu-1804-lts - type: PERSISTENT - networkInterfaces: - - accessConfigs: - - name: External NAT - type: ONE_TO_ONE_NAT - network: global/networks/default - metadata: - items: - - key: startup-script - value: | - #! /bin/bash - sudo apt-get update - sudo apt-get install apache2 -y - sudo service apache2 restart - echo "http-`hostname`" | sudo tee /var/www/html/index.html - EOF" - - - name: ${HC_NAME} - type: compute.v1.httpHealthCheck diff --git a/dm/templates/url_map/url_map.py b/dm/templates/url_map/url_map.py deleted file mode 100644 index 378139d3a8c..00000000000 --- a/dm/templates/url_map/url_map.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" This template creates a URL map. """ - - -def set_optional_property(destination, source, prop_name): - """ Copies the property value if present. """ - - if prop_name in source: - destination[prop_name] = source[prop_name] - - -def generate_config(context): - """ Entry point for the deployment resources. """ - - properties = context.properties - name = properties.get('name', context.env['name']) - project_id = properties.get('project', context.env['project']) - - resource = { - 'name': context.env['name'], - # https://cloud.google.com/compute/docs/reference/rest/v1/urlMaps - 'type': 'gcp-types/compute-v1:urlMaps', - 'properties': { - 'name': name, - 'project': project_id, - }, - } - - optional_properties = [ - 'defaultService', - 'defaultUrlRedirect', - 'description', - 'hostRules', - 'pathMatchers', - 'tests', - ] - - for prop in optional_properties: - set_optional_property(resource['properties'], properties, prop) - - return { - 'resources': [resource], - 'output': - [ - { - 'name': 'name', - 'value': name - }, - { - 'name': 'selfLink', - 'value': '$(ref.{}.selfLink)'.format(context.env['name']) - } - ] - } diff --git a/dm/templates/url_map/url_map.py.schema b/dm/templates/url_map/url_map.py.schema deleted file mode 100644 index 8e18da29fd0..00000000000 --- a/dm/templates/url_map/url_map.py.schema +++ /dev/null @@ -1,247 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: URL Map - author: Sourced Group Inc. - version: 1.0.0 - description: | - Supports creation of the URL Map resource. - - For more information on this resource: - https://cloud.google.com/load-balancing/docs/https/url-map-concepts - - APIs endpoints used by this template: - - gcp-types/compute-v1:urlMaps => - https://cloud.google.com/compute/docs/reference/rest/v1/urlMaps - -additionalProperties: false - -allOf: - - oneOf: - - required: - - defaultUrlRedirect - - required: - - defaultRouteAction - - required: - - defaultService - -properties: - name: - type: string - description: | - Must comply with RFC1035. Specifically, the name must be 1-63 characters long and match - the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, - and all following characters must be a dash, lowercase letter, or digit, except the last character, - which cannot be a dash. - Resource name would be used if omitted. - project: - type: string - description: | - The project ID of the project containing the Cloud Router instance. The - Google apps domain is prefixed if applicable. - description: - type: string - description: The resource description (optional). - defaultUrlRedirect: - type: object - additionalProperties: false - properties: - hostRedirect: - type: string - description: | - The host that will be used in the redirect response instead of the one that was supplied in the request. - pathRedirect: - type: string - description: | - The path that will be used in the redirect response instead of the one that was supplied in the request. - pathRedirect cannot be supplied together with prefixRedirect. Supply one alone or neither. If neither is supplied, the path of the original request will be used for the redirect. - prefixRedirect: - type: string - description: | - The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, retaining the remaining portion of the URL before redirecting the request. - prefixRedirect cannot be supplied together with pathRedirect. Supply one alone or neither. If neither is supplied, the path of the original request will be used for the redirect. - redirectResponseCode: - type: string - description: The HTTP Status code to use for this RedirectAction. - enum: - - MOVED_PERMANENTLY_DEFAULT - - FOUND - - SEE_OTHER - - TEMPORARY_REDIRECT - - PERMANENT_REDIRECT - httpsRedirect: - type: boolean - description: | - If set to true, the URL scheme in the redirected request is set to https. If set to false, the URL scheme of the redirected request will remain the same as that of the request. - This must only be set for UrlMaps used in TargetHttpProxys. Setting this true for TargetHttpsProxy is not permitted. - stripQuery: - type: boolean - description: If set to true, any accompanying query portion of the original URL is removed prior to redirecting the request. If set to false, the query portion of the original URL is retained. - defaultService: - type: string - description: | - The full or partial URL of the defaultService resource to which traffic is directed if none of the - hostRules match. If defaultRouteAction is additionally specified, advanced routing actions like URL Rewrites, - etc. take effect prior to sending the request to the backend. However, if defaultService is specified, - defaultRouteAction cannot contain any weightedBackendServices. Conversely, if routeAction specifies any - weightedBackendServices, service must not be specified. - - Only one of defaultService, defaultUrlRedirect or defaultRouteAction.weightedBackendService must be set. - - Authorization requires one or more of the following Google IAM permissions on the specified resource defaultService: - - compute.backendBuckets.use - - compute.backendServices.use - hostRules: - type: array - uniqItems: true - description: | - The list of HostRules to use against the URL. - items: - type: object - additionalProperties: false - properties: - description: - type: string - description: | - The resource description (optional). - hosts: - type: array - description: | - The list of host patterns to match. They must be valid hostnames, except * will match any string of - ([a-z0-9-.]*). In that case, * must be the first character and must be followed - in the pattern by either - or .. - items: - type: string - pathMatcher: - type: string - description: | - The name of the PathMatcher to use for matching the path portion of - the URL if the hostRule matches the URL's host portion. - pathMatchers: - type: array - uniqItems: true - description: | - The list of the named PathMatchers to use against the URL. - items: - type: object - additionalProperties: false - properties: - name: - type: string - description: | - The name to which the PathMatcher is referred by the HostRule. - description: - type: string - description: | - The resource description (optional). - defaultService: - type: string - description: | - The full or partial URL to the BackendService resource. This will be used if none of the pathRules or - routeRules defined by this PathMatcher are matched. For example, the following are - all valid URLs to a BackendService resource: - - https://www.googleapis.com/compute/v1/projects/project/global/backendServices/backendService - - compute/v1/projects/project/global/backendServices/backendService - - global/backendServices/backendService - - If defaultRouteAction is additionally specified, advanced routing actions like URL Rewrites, etc. take - effect prior to sending the request to the backend. However, if defaultService is specified, - defaultRouteAction cannot contain any weightedBackendServices. - Conversely, if defaultRouteAction specifies any weightedBackendServices, defaultService must not be specified. - Only one of defaultService, defaultUrlRedirect or defaultRouteAction.weightedBackendService must be set. - - Authorization requires one or more of the following Google IAM permissions on the specified resource defaultService: - - compute.backendBuckets.use - - compute.backendServices.use - - Authorization requires one or more of the following Google IAM permissions on the specified resource defaultService: - - compute.backendBuckets.use - - compute.backendServices.use - pathRules: - type: array - uniqItems: true - description: | - The list of path rules. - items: - type: object - additionalProperties: false - properties: - service: - type: string - description: | - The full or partial URL of the backend service resource to which traffic is directed if this - rule is matched. If routeAction is additionally specified, advanced routing actions like - URL Rewrites, etc. take effect prior to sending the request to the backend. However, if service - is specified, routeAction cannot contain any weightedBackendService s. Conversely, if routeAction - specifies any weightedBackendServices, service must not be specified. - - Only one of urlRedirect, service or routeAction.weightedBackendService must be set. - - Authorization requires one or more of the following Google IAM permissions on the specified resource service: - - compute.backendBuckets.use - - compute.backendServices.use - paths: - type: array - uniqItems: true - description: | - The list of the path patterns to match. Each pattern must - start with /. Asterisks (*) are allowed only at the end, - following the /. The string fed to the path matcher does not - include any text after the first ? or #, and those characters - are not allowed here. - items: - type: string - tests: - type: array - uniqItems: true - description: | - The list of the expected URL mapping tests. Request to update this UrlMap - succeed only if all of the test cases pass. You can specify a maximum of - 100 tests per UrlMap. - items: - type: object - additionalProperties: false - properties: - description: - type: string - description: | - The test case description. - host: - type: string - description: | - The host portion of the URL. - path: - type: string - description: | - The path portion of the URL. - service: - type: string - description: | - The BackendService resource the given URL is expected to be mapped - to. - -outputs: - name: - type: string - description: The resource name. - selfLink: - type: string - description: The URI (SelfLink) of the URL map rule resource. - -documentation: - - templates/url_map/README.md - -examples: - - templates/url_map/examples/url_map.yaml diff --git a/dm/templates/vpn/README.md b/dm/templates/vpn/README.md deleted file mode 100644 index 332157cb683..00000000000 --- a/dm/templates/vpn/README.md +++ /dev/null @@ -1,69 +0,0 @@ -# VPN - -This template creates a VPN. - -## Prerequisites - -- Install [gcloud](https://cloud.google.com/sdk) -- Create a [GCP project, set up billing, enable requisite APIs](../project/README.md) -- Create a [network](../network/README.md) -- Grant the [compute.networkAdmin or compute.admin](https://cloud.google.com/compute/docs/access/iam) IAM role to the project service account - -## Deployment - -### Resources - -- [compute.v1.targetVpnGateway](https://cloud.google.com/compute/docs/reference/latest/targetVpnGateways) -- [compute.v1.address](https://cloud.google.com/compute/docs/reference/rest/v1/addresses) -- [compute.v1.forwardingRule](https://cloud.google.com/compute/docs/reference/latest/forwardingRules) -- [compute.v1.vpnTunnel](https://cloud.google.com/compute/docs/reference/latest/vpnTunnels) -- [gcp-types/compute-v1:compute.routers.patch](https://www.googleapis.com/discovery/v1/apis/compute/v1/rest) - -### Properties - -See `properties` section in the schema file(s): - -- [VPN](../vpn/vpn.py.schema) - -### Usage - -1. Clone the [Deployment Manager samples repository](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit): - -```shell - git clone https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit -``` - -2. Go to the [dm](../../) directory: - -```shell - cd dm -``` - -3. Copy the example DM config to be used as a model for the deployment; in this case, [examples/vpn.yaml](examples/vpn.yaml): - -```shell - cp templates/vpn/examples/vpn.yaml my_vpn.yaml -``` - -4. Change the values in the config file to match your specific GCP setup (for properties, refer to the schema files listed above): - -```shell - vim my_vpn.yaml # <== change values to match your GCP setup -``` - -5. Create your deployment (replace with the relevant deployment name): - -```shell - gcloud deployment-manager deployments create \ - --config my_vpn.yaml -``` - -6. In case you need to delete your deployment: - -```shell - gcloud deployment-manager deployments delete -``` - -## Examples - -- [VPN](examples/vpn.yaml) diff --git a/dm/templates/vpn/examples/vpn.yaml b/dm/templates/vpn/examples/vpn.yaml deleted file mode 100644 index c090e2aaf1d..00000000000 --- a/dm/templates/vpn/examples/vpn.yaml +++ /dev/null @@ -1,30 +0,0 @@ -# Example of the VPN template usage. -# -# In this example, a VPN is created. -# -# Upon successful deployment of the VPN resource, the following resources -# are created: -# - a target VPN gateway (compute.v1.targetVpnGateway) -# - a static address (compute.v1.address) -# - an ESP forwarding rule (compute.v1.forwardingRule) -# - a UDP 4500 forwarding rule (compute.v1.forwardingRule) -# - a UDP 500 forwarding rule (compute.v1.forwardingRule) -# - a VPN tunnel (compute.v1.vpnTunnel) -# -# Replace FIXME:network-name with the name of a valid VPC within the region -# Replace FIXME:cloud-router-name with the name a valid Cloud Router - -imports: - - path: templates/vpn/vpn.py - name: vpn.py - -resources: - - name: test-vpn - type: vpn.py - properties: - region: us-east1 - network: - peerAddress: 1.2.3.4 - asn: 65001 - sharedSecret: superSecretPassw0rd - router: diff --git a/dm/templates/vpn/tests/integration/vpn.bats b/dm/templates/vpn/tests/integration/vpn.bats deleted file mode 100644 index db68770fc8f..00000000000 --- a/dm/templates/vpn/tests/integration/vpn.bats +++ /dev/null @@ -1,151 +0,0 @@ -#!/usr/bin/env bats - -source tests/helpers.bash - -TEST_NAME=$(basename "${BATS_TEST_FILENAME}" | cut -d '.' -f 1) - -# Create a random 10-char string and save it in a file. -RANDOM_FILE="/tmp/${CLOUD_FOUNDATION_ORGANIZATION_ID}-${TEST_NAME}.txt" -if [[ ! -e "${RANDOM_FILE}" ]]; then - RAND=$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 10) - echo ${RAND} > "${RANDOM_FILE}" -fi - -# Set variables based on the random string saved in the file. -# envsubst requires all variables used in the example/config to be exported. -if [[ -e "${RANDOM_FILE}" ]]; then - export RAND=$(cat "${RANDOM_FILE}") - DEPLOYMENT_NAME="${CLOUD_FOUNDATION_PROJECT_ID}-${TEST_NAME}-${RAND}" - # Replace underscores in the deployment name with dashes. - DEPLOYMENT_NAME=${DEPLOYMENT_NAME//_/-} - CONFIG=".${DEPLOYMENT_NAME}.yaml" -fi - -########## HELPER FUNCTIONS ########## - -function create_config() { - echo "Creating ${CONFIG}" - envsubst < "templates/vpn/tests/integration/${TEST_NAME}.yaml" > "${CONFIG}" -} - -function delete_config() { - echo "Deleting ${CONFIG}" - rm -f "${CONFIG}" -} - -function setup() { - # Global setup; this is executed once per test file. - if [ ${BATS_TEST_NUMBER} -eq 1 ]; then - gcloud compute networks create "network-${RAND}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --description "integration test ${RAND}" \ - --subnet-mode custom - gcloud compute networks subnets create "subnet-${RAND}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --network "network-${RAND}" \ - --range 10.118.8.0/22 \ - --region us-east1 - gcloud compute routers create "router-${RAND}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --network "network-${RAND}" \ - --asn 65001 \ - --region us-east1 - create_config - fi - - # Per-test setup steps. -} - -function teardown() { - # Global teardown; this is executed once per test file. - if [[ "$BATS_TEST_NUMBER" -eq "${#BATS_TEST_NAMES[@]}" ]]; then - gcloud compute routers delete "router-${RAND}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --region us-east1 -q - gcloud compute networks subnets delete "subnet-${RAND}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" \ - --region us-east1 -q - gcloud compute networks delete "network-${RAND}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -q - delete_config - rm -f "${RANDOM_FILE}" - fi - - # Per-test teardown steps. -} - - -@test "Creating deployment ${DEPLOYMENT_NAME} from ${CONFIG}" { - gcloud deployment-manager deployments create "${DEPLOYMENT_NAME}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" --config "${CONFIG}" -} - -@test "Verifying that resources were created in deployment ${DEPLOYMENT_NAME}" { - run gcloud compute networks list --filter="name:network-${RAND}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [ "$status" -eq 0 ] - [[ "$output" =~ "network-${RAND}" ]] - - run gcloud compute routers list --filter="name:router-${RAND}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [ "$status" -eq 0 ] - [[ "$output" =~ "router-${RAND} us-east1 network-${RAND}" ]] -} - -@test "Verifying the the static address was created in deployment ${DEPLOYMENT_NAME}" { - - run gcloud compute addresses list --filter="name:test-vpn-${RAND}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [ "$status" -eq 0 ] - [[ "$output" =~ "test-vpn-${RAND}" ]] - [[ "$output" =~ "us-east1" ]] -} - -@test "Verifying that the target VPN gateway was created in deployment ${DEPLOYMENT_NAME}" { - - run gcloud compute target-vpn-gateways list \ - --filter="name:test-vpn-${RAND}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [ "$status" -eq 0 ] - [[ "$output" =~ "test-vpn-${RAND} network-${RAND} us-east1" ]] -} - -@test "Verifying that the VPN tunnel was created in deployment ${DEPLOYMENT_NAME}" { - - run gcloud compute vpn-tunnels list --filter="name:test-vpn-${RAND}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [ "$status" -eq 0 ] - [[ "$output" =~ "test-vpn-${RAND} us-east1 test-vpn-${RAND} 1.2.3.4" ]] -} - -@test "Verifying that the forwarding rules were created in deployment ${DEPLOYMENT_NAME}" { - - run gcloud compute forwarding-rules list --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [ "$status" -eq 0 ] - [[ "$output" =~ "test-vpn-${RAND}-esp us-east1" ]] - [[ "$output" =~ "test-vpn-${RAND}-udp-4500 us-east1" ]] - [[ "$output" =~ "test-vpn-${RAND}-udp-500 us-east1" ]] -} - -@test "Deleting deployment" { - gcloud deployment-manager deployments delete ${DEPLOYMENT_NAME} \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" -q - - run gcloud compute addresses list --filter="name:test-vpn-${RAND}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ ! "$output" =~ "test-vpn-${RAND}" ]] - - run gcloud compute target-vpn-gateways list \ - --filter="name:test-vpn-${RAND}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ ! "$output" =~ "test-vpn-${RAND}" ]] - - run gcloud compute vpn-tunnels list --filter="name:test-vpn-${RAND}" \ - --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ ! "$output" =~ "test-vpn-${RAND}" ]] - - run gcloud compute forwarding-rules list --project "${CLOUD_FOUNDATION_PROJECT_ID}" - [[ ! "$output" =~ "test-vpn-${RAND}-esp" ]] - [[ ! "$output" =~ "test-vpn-${RAND}-udp-4500" ]] - [[ ! "$output" =~ "test-vpn-${RAND}-udp-500" ]] -} diff --git a/dm/templates/vpn/tests/integration/vpn.yaml b/dm/templates/vpn/tests/integration/vpn.yaml deleted file mode 100644 index 39f41d221c2..00000000000 --- a/dm/templates/vpn/tests/integration/vpn.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# Test of the VPN template. -# -# Variables: -# RAND: A random string used by the testing suite. -# - -imports: - - path: templates/vpn/vpn.py - name: vpn.py - -resources: - - name: vpn-${RAND} - type: vpn.py - properties: - name: test-vpn-${RAND} - region: us-east1 - network: network-${RAND} - peerAddress: 1.2.3.4 - asn: 65001 - sharedSecret: superSecretPassw0rd - router: router-${RAND} diff --git a/dm/templates/vpn/vpn.py b/dm/templates/vpn/vpn.py deleted file mode 100644 index 968f4160cea..00000000000 --- a/dm/templates/vpn/vpn.py +++ /dev/null @@ -1,234 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""This template creates a VPN tunnel, gateway, and forwarding rules.""" - - -def generate_config(context): - """ Entry point for the deployment resources. """ - - - properties = context.properties - project_id = properties.get('project', context.env['project']) - - network = context.properties.get('networkURL', generate_network_uri( - project_id, - context.properties.get('network','') - )) - target_vpn_gateway = context.env['name'] + '-tvpng' - esp_rule = context.env['name'] + '-esp-rule' - udp_500_rule = context.env['name'] + '-udp-500-rule' - udp_4500_rule = context.env['name'] + '-udp-4500-rule' - vpn_tunnel = context.env['name'] + '-vpn' - router_vpn_binding = context.env['name'] + '-router-vpn-binding' - resources = [] - if 'ipAddress' in context.properties: - ip_address = context.properties['ipAddress'] - static_ip = '' - else: - static_ip = context.env['name'] + '-ip' - resources.append({ - # The reserved address resource. - 'name': static_ip, - # https://cloud.google.com/compute/docs/reference/rest/v1/addresses - 'type': 'gcp-types/compute-v1:addresses', - 'properties': { - 'name': properties.get('name', static_ip), - 'project': project_id, - 'region': context.properties['region'] - } - }) - ip_address = '$(ref.' + static_ip + '.address)' - - resources.extend([ - { - # The target VPN gateway resource. - 'name': target_vpn_gateway, - # https://cloud.google.com/compute/docs/reference/rest/v1/targetVpnGateways - 'type': 'gcp-types/compute-v1:targetVpnGateways', - 'properties': - { - 'name': properties.get('name', target_vpn_gateway), - 'project': project_id, - 'network': network, - 'region': context.properties['region'], - } - }, - { - # The forwarding rule resource for the ESP traffic. - 'name': esp_rule, - # https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules - 'type': 'gcp-types/compute-v1:forwardingRules', - 'properties': - { - 'name': '{}-esp'.format(properties.get('name')) if 'name' in properties else esp_rule, - 'project': project_id, - 'IPAddress': ip_address, - 'IPProtocol': 'ESP', - 'region': context.properties['region'], - 'target': '$(ref.' + target_vpn_gateway + '.selfLink)', - } - }, - { - # The forwarding rule resource for the UDP traffic on port 4500. - 'name': udp_4500_rule, - # https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules - 'type': 'gcp-types/compute-v1:forwardingRules', - 'properties': - { - 'name': '{}-udp-4500'.format(properties.get('name')) if 'name' in properties else udp_4500_rule, - 'project': project_id, - 'IPAddress': ip_address, - 'IPProtocol': 'UDP', - 'portRange': 4500, - 'region': context.properties['region'], - 'target': '$(ref.' + target_vpn_gateway + '.selfLink)', - } - }, - { - # The forwarding rule resource for the UDP traffic on port 500 - 'name': udp_500_rule, - # https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules - 'type': 'gcp-types/compute-v1:forwardingRules', - 'properties': - { - 'name': '{}-udp-500'.format(properties.get('name')) if 'name' in properties else udp_500_rule, - 'project': project_id, - 'IPAddress': ip_address, - 'IPProtocol': 'UDP', - 'portRange': 500, - 'region': context.properties['region'], - 'target': '$(ref.' + target_vpn_gateway + '.selfLink)', - } - }, - - ]) - router_url_tag = 'routerURL' - router_name_tag = 'router' - - if router_name_tag in context.properties: - router_url = context.properties.get(router_url_tag, generate_router_uri( - context.env['project'], - context.properties['region'], - context.properties[router_name_tag])) - # Create dynamic routing VPN - resources.extend([ - { - # The VPN tunnel resource. - 'name': vpn_tunnel, - # https://cloud.google.com/compute/docs/reference/rest/v1/vpnTunnels - 'type': 'gcp-types/compute-v1:vpnTunnels', - 'properties': - { - 'name': properties.get('name', vpn_tunnel), - 'project': project_id, - 'description': - 'A vpn tunnel', - 'ikeVersion': - 2, - 'peerIp': - context.properties['peerAddress'], - 'region': - context.properties['region'], - 'router': router_url, - 'sharedSecret': - context.properties['sharedSecret'], - 'targetVpnGateway': - '$(ref.' + target_vpn_gateway + '.selfLink)' - }, - 'metadata': { - 'dependsOn': [esp_rule, - udp_500_rule, - udp_4500_rule] - } - }]) - else: - # Create static routing VPN - resources.append( - { - # The VPN tunnel resource. - 'name': vpn_tunnel, - 'type': 'gcp-types/compute-v1:vpnTunnels', - 'properties': { - 'name': vpn_tunnel, - 'description': - 'A vpn tunnel', - 'ikeVersion': - 2, - 'peerIp': - context.properties['peerAddress'], - 'region': - context.properties['region'], - 'sharedSecret': - context.properties['sharedSecret'], - 'targetVpnGateway': - '$(ref.' + target_vpn_gateway + '.selfLink)', - 'localTrafficSelector': - context.properties['localTrafficSelector'], - 'remoteTrafficSelector': - context.properties['remoteTrafficSelector'], - - }, - 'metadata': { - 'dependsOn': [esp_rule, udp_500_rule, udp_4500_rule] - } - }, - ) - - return { - 'resources': - resources, - 'outputs': - [ - { - 'name': 'targetVpnGateway', - 'value': target_vpn_gateway - }, - { - 'name': 'staticIp', - 'value': static_ip - }, - { - 'name': 'espRule', - 'value': esp_rule - }, - { - 'name': 'udp500Rule', - 'value': udp_500_rule - }, - { - 'name': 'udp4500Rule', - 'value': udp_4500_rule - }, - { - 'name': 'vpnTunnel', - 'value': vpn_tunnel - }, - { - 'name': 'vpnTunnelUri', - 'value': '$(ref.'+vpn_tunnel+'.selfLink)' - } - ] - } - -def generate_network_uri(project_id, network): - """Format the resource name as a resource URI.""" - return 'projects/{}/global/networks/{}'.format(project_id, network) - -def generate_router_uri(project_id, region, router_name): - """Format the router name as a router URI.""" - return 'projects/{}/regions/{}/routers/{}'.format( - project_id, - region, - router_name - ) diff --git a/dm/templates/vpn/vpn.py.schema b/dm/templates/vpn/vpn.py.schema deleted file mode 100644 index 32b5c6f3a21..00000000000 --- a/dm/templates/vpn/vpn.py.schema +++ /dev/null @@ -1,161 +0,0 @@ -# Copyright 2018 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -info: - title: VPN - author: Sourced Group - version: 1.1.0 - description: | - Creates a VPN tunnel, gateway, and fowarding rules. - - For more information on this resource: - https://cloud.google.com/vpn/docs/concepts/overview - - APIs endpoints used by this template: - - gcp-types/compute-v1:instances => - https://cloud.google.com/compute/docs/reference/rest/v1/instances - - gcp-types/compute-v1:forwardingRules => - https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules - - gcp-types/compute-v1:addresses => - https://cloud.google.com/compute/docs/reference/rest/v1/addresses - - gcp-types/compute-v1:targetVpnGateways => - https://cloud.google.com/compute/docs/reference/rest/v1/targetVpnGateways - - gcp-types/compute-v1:vpnTunnels => - https://cloud.google.com/compute/docs/reference/rest/v1/vpnTunnels - - gcp-types/compute-v1:compute.routers.patch => - https://cloud.google.com/compute/docs/reference/rest/v1/routers/patch - -additionalProperties: false - -allOf: - - required: - - region - - peerAddress - - sharedSecret - - oneOf: - - required: - - networkURL - - required: - - network - - oneOf: - # Use dynamic routing. - - required: - - asn - - router - # Use static routing. - - allOf: - - not: - required: - - router - - required: - - localTrafficSelector - - remoteTrafficSelector - -properties: - name: - type: string - description: | - Common name for all provisioned resources. - Resource name would be used if omitted. - project: - type: string - description: | - The project ID of the project containing resources. The - Google apps domain is prefixed if applicable. - routerURL: - type: string - description: URL (or URI) of the Router resource. Used by vpnTunnels. - router: - type: string - description: | - Name of the Router resource. - networkURL: - type: string - description: | - The URL (or URI) of the network to which the VPN belongs. - network: - type: string - description: | - The name of the network to which the VPN belongs. Only use "networkName" - when it is impossible to get "networkURL". - region: - type: string - description: The URI of the region where the VPN resides. - peerAddress: - type: string - description: The IP address of the peer VPN gateway. Only IPv4 is supported. - pattern: ^([0-9]{1,3}\.){3}[0-9]{1,3}$ - asn: - type: integer - description: | - The local BGP Autonomous System Number (ASN). Must be an RFC6996 private - ASN, either 16-bit or 32-bit. The value will be fixed for the VPN resource. - All VPN tunnels that link to the router will have the same local ASN. - sharedSecret: - type: string - description: | - The value is used to set the secure session between the Cloud VPN - gateway and the peer VPN gateway. - localTrafficSelector: - type: array - description: | - Used when establishing the VPN tunnel with the peer VPN gateway. - default: ["0.0.0.0/0"] - uniqItems: true - items: - type: string - description: "CIDR formatted string, for example: 192.168.0.0/16." - remoteTrafficSelector: - type: array - description: | - Used when establishing the VPN tunnel with the peer VPN gateway. - default: ["0.0.0.0/0"] - uniqItems: true - items: - type: string - description: "CIDR formatted string, for example: 192.168.0.0/16." - ipAddress: - type: string - description: | - Static IP address used by forwarding rules. When not specified, a new static - IP address will be created. - -outputs: - targetVpnGateway: - type: string - description: The name of the target VPN gateway resource. - staticIp: - type: string - description: The name of the reserved address resource. - espRule: - type: string - description: The name of the ForwardingRule resource for the ESP traffic. - udp4500Rule: - type: string - description: The name of the ForwardingRule resource for the UDP 4500 traffic. - udp500Rule: - type: string - description: The name of the ForwardingRule resource for the UDP 500 traffic. - vpnTunnel: - type: string - description: The name of the VPN tunnel resource. - vpnTunnelUri: - type: string - description: The URI of the VPN tunnel resource. - -documentation: - - templates/vpn/README.md - -examples: - - templates/vpn/examples/vpn.yaml diff --git a/dm/tests/cloud-foundation-tests.conf.example b/dm/tests/cloud-foundation-tests.conf.example deleted file mode 100644 index 53fede64694..00000000000 --- a/dm/tests/cloud-foundation-tests.conf.example +++ /dev/null @@ -1,5 +0,0 @@ -# Please change the values for these variables to match your organization's GCP setup -export CLOUD_FOUNDATION_ORGANIZATION_ID="111111111111" -export CLOUD_FOUNDATION_PROJECT_ID="my-project" -export CLOUD_FOUNDATION_BILLING_ACCOUNT_ID="AAAAAA-111111-222222" -export CLOUD_FOUNDATION_USER_ACCOUNT="my-name@example.com" diff --git a/dm/tests/fixtures/configs/my-firewalls.yaml b/dm/tests/fixtures/configs/my-firewalls.yaml deleted file mode 100644 index 766c72db350..00000000000 --- a/dm/tests/fixtures/configs/my-firewalls.yaml +++ /dev/null @@ -1,48 +0,0 @@ -name: my-firewalls -description: My firewalls deployment - -imports: - - path: templates/firewall/firewall.py -resources: - - type: templates/firewall/firewall.py - name: my-firewall-prod - properties: - network: $(out.my-networks.my-network-prod.name) - rules: - - name: allow-proxy-from-inside-prod - allowed: - - IPProtocol: tcp - ports: - - "80" - - "444" - description: This rule allows connectivity to the HTTP proxies - direction: INGRESS - sourceRanges: - - 10.0.0.0/8 - - name: allow-dns-from-inside-prod - allowed: - - IPProtocol: udp - ports: - - "53" - - IPProtocol: tcp - ports: - - "53" - description: this rule allows DNS queries to google's 8.8.8.8 - direction: EGRESS - destinationRanges: - - 8.8.8.8/32 - - type: templates/firewall/firewall.py - name: my-firewall-dev - properties: - network: $(out.my-networks.my-network-prod.name) - rules: - - name: allow-proxy-from-inside-dev - allowed: - - IPProtocol: tcp - ports: - - "80" - - "444" - description: This rule allows connectivity to the HTTP proxies - direction: INGRESS - sourceRanges: - - 10.0.0.0/8 diff --git a/dm/tests/fixtures/configs/my-instance-1.yaml b/dm/tests/fixtures/configs/my-instance-1.yaml deleted file mode 100644 index 2d0359a6ace..00000000000 --- a/dm/tests/fixtures/configs/my-instance-1.yaml +++ /dev/null @@ -1,23 +0,0 @@ -# Example on how to use the instance template -# -# In this example, a Ubuntu 18.04 instance with Nginx is created. -description: My instance deployment for prod environment - -imports: - - path: templates/instance/instance.py - name: instance.py - -resources: - - name: my-instance-prod-1 - type: instance.py - properties: - zone: us-central1-a - diskImage: projects/ubuntu-os-cloud/global/images/family/ubuntu-1804-lts - diskSizeGb: 100 - machineType: f1-micro - diskType: pd-ssd - network: $(out.my-networks.my-network-prod.name) - metadata: - items: - - key: startup-script - value: sudo apt-get update && sudo apt-get install -y nginx diff --git a/dm/tests/fixtures/configs/my-instance-2.yaml b/dm/tests/fixtures/configs/my-instance-2.yaml deleted file mode 100644 index cfcf6703102..00000000000 --- a/dm/tests/fixtures/configs/my-instance-2.yaml +++ /dev/null @@ -1,30 +0,0 @@ -# Example on how to use the instance template -# -# In this example, a Ubuntu 18.04 instance with Nginx is created. -description: | - This instance is dependend on 'my-networks' and 'my-instance-1' configs - -imports: - - path: templates/instance/instance.py - name: instance.py - -resources: - - name: my-instance-prod-2 - type: instance.py - properties: - zone: us-central1-a - diskImage: projects/ubuntu-os-cloud/global/images/family/ubuntu-1804-lts - diskSizeGb: 100 - machineType: f1-micro - diskType: pd-ssd - network: $(out.my-networks.my-network-prod.name) - metadata: - items: - - key: startup-script - value: | - #!/bin/bash - apt-get update - apt-get install -y nginx - cat << EOF > /tmp/instance-1-ip.txt - $(out.my-instance-1.my-instance-prod-1.internalIp) - EOF diff --git a/dm/tests/fixtures/configs/my-networks.yaml b/dm/tests/fixtures/configs/my-networks.yaml deleted file mode 100644 index d3837a6c35d..00000000000 --- a/dm/tests/fixtures/configs/my-networks.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Example of a config that has no dependencies. -# -# Notice the 'name' and 'project' yaml directives are not specified in this -# config, meaning that the deployment name will be obtained from the filename -# (without the extension), and the project needs to be specified via one of -# these methods, in this order of precedence: -# 1- The '--project' command line option (highest precedence) -# 2- The 'project' directive in this config file -# 3- The CLOUD_FOUNDATION_TOOLKIT_PROJECT_ID environment variable -# 4- The default projected configured with the CGP SDK - - -description: my networks deployment - -imports: - - path: templates/network/network.py - -resources: - - type: templates/network/network.py - name: my-network-prod - properties: - autoCreateSubnetworks: true - - - type: templates/network/network.py - name: my-network-dev - properties: - autoCreateSubnetworks: false diff --git a/dm/tests/helpers.bash b/dm/tests/helpers.bash deleted file mode 100644 index 96c24f33a56..00000000000 --- a/dm/tests/helpers.bash +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -# This file is meant to hold common variables and functions to be used by the -# testing suite (bats). -# -# Tests need to run against the user's own organization/projects/etc, so the -# most basic configs are read and exported from the user's own -# `~/.cloud-foundation-test.conf`. -# -# An example for this config is placed under `tests/cloud-foundation-tests.conf`. Users should -# move this file to `~/.cloud-foundation-test.conf` and tweak according to their own GCP -# organizational structure - -CLOUD_FOUNDATION_CONF=${CLOUD_FOUNDATION_CONF-~/.cloud-foundation-tests.conf} - -if [[ -z "${CLOUD_FOUNDATION_ORGANIZATION_ID}" || -z "${CLOUD_FOUNDATION_BILLING_ACCOUNT_ID}" || -z "${CLOUD_FOUNDATION_PROJECT_ID}" ]]; then - if [[ ! -e ${CLOUD_FOUNDATION_CONF} ]]; then - echo "Please setup your environment variables or Cloud Foundation config file" - echo "Default location for config: ~/.cloud-foundation-tests.conf. Example:" - echo "=====================" - cat tests/cloud-foundation-tests.conf.example - echo "=====================" - exit 1 - fi - source ${CLOUD_FOUNDATION_CONF} -fi diff --git a/dm/tests/run-tests b/dm/tests/run-tests deleted file mode 100644 index b51dc2762ee..00000000000 --- a/dm/tests/run-tests +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -# -# Copyright 2018 Google Inc. All Rights Reserved. -# - -# Finds the directory name for this script, and source the env -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )" -source "$DIR/../src/cftenv" - -# Executes tests -exec python -m pytest ${@} diff --git a/dm/tests/templates/conftest.py b/dm/tests/templates/conftest.py deleted file mode 100644 index c93cf88fb00..00000000000 --- a/dm/tests/templates/conftest.py +++ /dev/null @@ -1,39 +0,0 @@ -from os.path import join, dirname, isdir -from os import walk -import yaml - -def read_file(path): - with open(path) as f: - contents = f.read() - f.close() - return yaml.safe_load(contents) - -def look_for_schemas_tests(root): - ret = [] - - for (dirpath, dirnames, filenames) in walk(root): - for file in filenames: - if ( - file.endswith('.yaml') and - (file.startswith('invalid_') or file.startswith('valid_')) - ): - filename = join(dirpath, file) - ret.append( - (file.startswith('valid_'), filename, read_file(filename)) - ) - - return ret - -def look_for_schemas_dirs_tests(): - template_root = join(dirname(__file__), '..', '..', 'templates') - ret = [] - - for (dirpath, dirnames, filenames) in walk(template_root): - for dir in dirnames: - dir_unit = join(dirpath, dir, 'tests', 'schemas') - if isdir(dir_unit): - schema = read_file(join(dirpath, dir, dir + '.py.schema')) - ret.append((dir, schema, look_for_schemas_tests(dir_unit))) - - return ret - diff --git a/dm/tests/templates/test_schemas.py b/dm/tests/templates/test_schemas.py deleted file mode 100644 index cfb4f274b64..00000000000 --- a/dm/tests/templates/test_schemas.py +++ /dev/null @@ -1,15 +0,0 @@ -from .conftest import look_for_schemas_dirs_tests -from jsonschema.exceptions import ValidationError -from jsonschema import validate - -def test_schemas(): - modules = look_for_schemas_dirs_tests() - for (module, schema, files) in modules: - for (isValid, path, data) in files: - try: - validate(data, schema) - if not isValid: - raise Exception("Validation for {} should have failed".format(path)) - except ValidationError: - if isValid: - raise diff --git a/dm/tests/unit/conftest.py b/dm/tests/unit/conftest.py deleted file mode 100644 index 8b058460470..00000000000 --- a/dm/tests/unit/conftest.py +++ /dev/null @@ -1,84 +0,0 @@ -from collections import namedtuple -import io -import jinja2 -import os -import os.path -import pytest - -FIXTURES_DIR = '../fixtures' - -ConfigType = namedtuple('ConfigType', ['path', 'content', 'jinja']) - -def get_fixtures_fullpath(): - """ Returns the full path for the fixture directory - - Args: - - Returns: The full path to the fixture diretory, Eg: - /home/vagrant/git/cloud-foundation-toolkit/dm/tests/unit/../fixtures/configs/ - - """ - return '{}/{}'.format( - os.path.dirname(os.path.realpath(__file__)), - FIXTURES_DIR, - ) - -def get_configsdir_fullpath(): - """ Returns the full path for a config file fixture - - Args: - - Returns: The full path to the config directory, Eg: - /home/vagrant/git/cloud-foundation-toolkit/dm/tests/unit/../fixtures/configs/config-test-1.yaml - - """ - return '{}/{}'.format( - get_fixtures_fullpath(), - 'configs' - ) - -def get_config_fullpath(config): - """ Returns the full path for a config file fixture - - Args: - config (string): The config file name. Eg, config-test-1.yaml - - Returns: The full path to the config file, Eg: - /home/vagrant/git/cloud-foundation-toolkit/dm/tests/unit/../fixtures/configs/config-test-1.yaml - - """ - return '{}/{}'.format( - get_configsdir_fullpath(), - config - ) - - -class Configs(): - directory = get_configsdir_fullpath() - - @property - def files(self): - if not hasattr(self, '_files'): - self._files = {} - files = [f for f in os.listdir(self.directory) if '.yaml' == f[-5:]] - for f in files: - fullpath = get_config_fullpath(f) - content = io.open(fullpath).read() - self._files[f] = ConfigType( - path=fullpath, - content=content, - jinja=jinja2.Template(content).render() - ) - return self._files - - -@pytest.fixture -def configs(): - return Configs() - - -if __name__ == '__main__': - c = Configs() - print(c.directory) - for f in c.files: - print(f.path, f.content) diff --git a/dm/tests/unit/test_actions.py b/dm/tests/unit/test_actions.py deleted file mode 100644 index 490ca8eb30c..00000000000 --- a/dm/tests/unit/test_actions.py +++ /dev/null @@ -1,137 +0,0 @@ -from six import PY2 - -if PY2: - import mock -else: - import unittest.mock as mock - - -from apitools.base.py.exceptions import HttpNotFoundError -import pytest - -from cloud_foundation_toolkit import actions -from cloud_foundation_toolkit.deployment import Config, ConfigGraph - - -ACTIONS = ['apply', 'create', 'delete', 'update'] - - -class Args(object): - - def __init__(self, **kwargs): - self.preview = False - self.project = False - self.show_stages = False - self.format = 'human' - [setattr(self, k, v) for k, v in kwargs.items()] - - -def get_number_of_elements(items): - if isinstance(item, list): - return sum(get_number_of_elements(subitem) for subitem in item) - else: - return 1 - - -def test_execute(configs): - args = Args(action='apply', config=[configs.directory]) - with mock.patch('cloud_foundation_toolkit.actions.Deployment') as m1: - graph = ConfigGraph([v.path for k, v in configs.files.items()]) - n_configs = len(configs.files) - - r = actions.execute(args) - assert r == None - assert m1.call_count == n_configs - - args.show_stages = True - r = actions.execute(args) - assert r == None - assert m1.call_count == n_configs - - with mock.patch('cloud_foundation_toolkit.actions.json.dumps') as m2: - args.format = 'json' - r = actions.execute(args) - assert m1.call_count == n_configs - assert m2.call_count == 1 - - with mock.patch('cloud_foundation_toolkit.actions.YAML.dump') as m2: - args.format = 'yaml' - r = actions.execute(args) - assert m1.call_count == n_configs - assert m2.call_count == 1 - - - - - -def test_valid_actions(): - ACTUAL_ACTIONS = actions.ACTION_MAP.keys() - ACTUAL_ACTIONS.sort() - assert ACTUAL_ACTIONS == ACTIONS - - -def test_action(configs): - args = Args(config=[configs.directory]) - for action in ACTIONS: - args.action = action - args.show_stages = False - n_configs = len(configs.files) - with mock.patch('cloud_foundation_toolkit.actions.Deployment') as m1: - # Test the normal/expected flow of the function - r = actions.execute(args) - method = getattr(mock.call(), action) - assert m1.call_count == n_configs - if action == 'delete': - assert m1.mock_calls.count(method()) == n_configs - else: - assert m1.mock_calls.count(method(preview=args.preview)) == n_configs - - # Test exception handling in the function - m1.reset_mock() - getattr(m1.return_value, action).side_effect = HttpNotFoundError('a', 'b', 'c') - if action == 'delete': - # if delete is called, execute() should catch the exception - # and keep going as if nothing happens - r = actions.execute(args) - assert m1.mock_calls.count(method()) == n_configs - else: - # If exception is raised in any method other than delete, - # something is really wrong, so exception in re-raised - # by `execute()`, making the script exit - # called onde - with pytest.raises(HttpNotFoundError): - r = actions.execute(args) - assert m1.mock_calls.count(method(preview=args.preview)) == 1 - - # Test dry-run - m1.reset_mock() - args.show_stages = True - r = actions.execute(args) - method = getattr(mock.call(), action) - m1.assert_not_called() - - -def test_get_config_files(configs): - # Test only single directory - r = actions.get_config_files([configs.directory]) - files = [v.path for k, v in configs.files.items()] - files.sort() - r.sort() - assert files == r - - # Test only files - files = [v.path for k, v in configs.files.items()] - r = actions.get_config_files(files) - files.sort() - r.sort() - assert files == r - - # Test files and directories - confs = [configs.directory] + ['some_file.yaml'] - r = actions.get_config_files(confs) - files = [v.path for k, v in configs.files.items()] + ['some_file.yaml'] - files.sort() - r.sort() - assert files == r - - diff --git a/dm/tests/unit/test_deployment.py b/dm/tests/unit/test_deployment.py deleted file mode 100644 index b64978eaea3..00000000000 --- a/dm/tests/unit/test_deployment.py +++ /dev/null @@ -1,90 +0,0 @@ -from six import PY2 - -from apitools.base.py.exceptions import HttpNotFoundError -import jinja2 -import pytest -from ruamel.yaml import YAML - -from cloud_foundation_toolkit.deployment import Config -from cloud_foundation_toolkit.deployment import ConfigGraph -from cloud_foundation_toolkit.deployment import Deployment - -if PY2: - import mock -else: - import unittest.mock as mock - -class Message(): - def __init__(self, **kwargs): - [setattr(self, k, v) for k, v in kwargs.items()] - - -@pytest.fixture -def args(): - return Args() - -def test_config(configs): - c = Config(configs.files['my-networks.yaml'].path) - assert c.as_string == configs.files['my-networks.yaml'].jinja - - -def test_config_list(configs): - config_paths = [v.path for k, v in configs.files.items()] - config_list = ConfigGraph(config_paths) - for level in config_list: - assert isinstance(level, list) - for c in level: - assert isinstance(c, Config) - - -def test_deployment_object(configs): - config = Config(configs.files['my-networks.yaml'].path) - deployment = Deployment(config) - assert deployment.config['name'] == 'my-networks' - - -def test_deployment_get(configs): - config = Config(configs.files['my-networks.yaml'].path) - deployment = Deployment(config) - with mock.patch.object(deployment.client.deployments, 'Get') as m: - m.return_value = Message( - name='my-networks', - fingerprint='abcdefgh' - ) - d = deployment.get() - assert d is not None - assert deployment.current == d - - -def test_deployment_get_doesnt_exist(configs): - config = Config(configs.files['my-networks.yaml'].path) - deployment = Deployment(config) - with mock.patch('cloud_foundation_toolkit.deployment.get_deployment') as m: - m.return_value = None - d = deployment.get() - assert d is None - assert deployment.current == d - - -def test_deployment_create(configs): - config = Config(configs.files['my-networks.yaml'].path) - patches = { - 'client': mock.DEFAULT, - 'wait': mock.DEFAULT, - 'get': mock.DEFAULT, - 'print_resources_and_outputs': mock.DEFAULT - } - - with mock.patch.multiple(Deployment, **patches) as mocks: - deployment = Deployment(config) - mocks['client'].deployments.Insert.return_value = Message( - name='my-network-prod', - fingerprint='abcdefgh' - ) - mocks['client'].deployments.Get.return_value = Message( - name='my-network-prod', - fingerprint='abcdefgh' - ) - - d = deployment.create() - assert deployment.current == d diff --git a/dm/tests/unit/test_dm_utils.py b/dm/tests/unit/test_dm_utils.py deleted file mode 100644 index 268ac94cfd9..00000000000 --- a/dm/tests/unit/test_dm_utils.py +++ /dev/null @@ -1,26 +0,0 @@ -from six import PY2 - -from apitools.base.py.exceptions import HttpNotFoundError -import pytest -from ruamel.yaml import YAML - -from cloud_foundation_toolkit.dm_utils import API -from cloud_foundation_toolkit.dm_utils import get_deployment - - -if PY2: - import mock -else: - import unittest.mock as mock - -class Message(): - def __init__(self, **kwargs): - [setattr(self, k, v) for k, v in kwargs.items()] - - -def test_get_deployment(): - with mock.patch.object(API.client.deployments, 'Get') as m: - m.side_effect = HttpNotFoundError('a', 'b', 'c') - d = get_deployment('some-deployment', 'some-project') - assert d is None - diff --git a/dm/tox.ini b/dm/tox.ini deleted file mode 100644 index 35660b8268a..00000000000 --- a/dm/tox.ini +++ /dev/null @@ -1,18 +0,0 @@ -[tox] -envlist = py27 - -[testenv] -setenv = - VIRTUAL_ENV={envdir} -deps = - -rrequirements/development.txt -usedevelop = True -whitelist_externals = - make - /bin/bash -commands = - {toxinidir}/tests/run-tests {posargs} - -[testenv:venv] -envdir = venv -commands = diff --git a/docs/meta/generate-index.py b/docs/meta/generate-index.py deleted file mode 100755 index 8d4b65d0d43..00000000000 --- a/docs/meta/generate-index.py +++ /dev/null @@ -1,146 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys -import argparse -import yaml - -import requests -from collections import OrderedDict -from jinja2 import Environment, FileSystemLoader - -TERRAFORM_REGISTRY_BASE = "https://registry.terraform.io/v1/" - -class IndexItem(yaml.YAMLObject): - yaml_tag = u'!module' - - def __init__(self, data): - self.children = {} - self.data = data - - def name(self): - return self.data.get("name") - - def url(self): - if "source" in self.data: - return self.data.get("source") - path = self.data.get("path") - return f"{self.parent.url()}/tree/master/{path}" - - def should_display(self): - return not self.data.get('exclude', False) - - def description(self): - return self.data.get("description") - - def add_child_data(self, data): - child = self.add_child(IndexItem(data)) - child.data = {**child.data, **data} - return child - - def add_child(self, child): - if child.name() not in self.children: - self.children[child.name()] = child - child.parent = self - return self.children[child.name()] - - @classmethod - def to_yaml(cls, representer, node): - rep = OrderedDict() - rep_keys = ["name", "description", "source", "path", "exclude"] - for key in rep_keys: - if key in node.data: - rep[key] = node.data[key] - if len(node.children) >= 1: - rep["children"] = sorted(node.children.values(), key=lambda mod: mod.name()) - return representer.represent_mapping(cls.yaml_tag, rep) - - @classmethod - def from_yaml(cls, constructor, node): - data = constructor.construct_mapping(node, deep=True) - children = data.pop("children", []) - item = cls(data) - for child in children: - item.add_child(child) - return item - -def generate_index(root, org_name): - url = f"{TERRAFORM_REGISTRY_BASE}/modules/{org_name}" - r = requests.get(url, params={"limit": 100}) - data = r.json() - - for module in data.get("modules", []): - item = root.add_child_data(module) - id = module.get("id") - r = requests.get(f"{TERRAFORM_REGISTRY_BASE}/modules/{id}") - data = r.json() - for child in data.get("submodules", []): - item.add_child_data(child) - -def render_index(index, templates_dir, docs_dir): - env = Environment( - keep_trailing_newline=True, - loader=FileSystemLoader(templates_dir), - trim_blocks=True, - lstrip_blocks=True, - ) - templates = env.list_templates() - - for template_file in templates: - if not template_file.endswith(".tmpl"): - continue - output_file = os.path.basename(template_file.replace(".tmpl", "")) - - template = env.get_template(template_file) - modules = [mod for mod in index.children.values() if mod.should_display()] - rendered = template.render(modules=modules) - - with open(os.path.join(docs_dir, output_file), "w") as f: - f.write(rendered) - -def main(argv): - parser = argparser() - args = parser.parse_args(argv[1:]) - docs_dir = args.docs_dir - meta_dir = os.path.join(docs_dir, "meta") - - index_file = os.path.join(meta_dir, "index.yaml") - if os.path.isfile(index_file): - with open(index_file, "r") as f: - root = yaml.load(f, Loader=yaml.Loader) - else: - root = IndexItem({"name": "terraform"}) - - if not args.skip_refresh: - generate_index(root, "terraform-google-modules") - generate_index(root, "googlecloudplatform") - - with open(index_file, "w") as f: - yaml.dump(root, f) - - render_index(root, meta_dir, docs_dir) - -def argparser(): - parser = argparse.ArgumentParser(description='Generate index of blueprints') - parser.add_argument('docs_dir', nargs="?", default="docs/") - - parser.add_argument('--skip-refresh', default=False, action='store_true') - - return parser - -if __name__ == "__main__": - main(sys.argv) diff --git a/docs/meta/index.yaml b/docs/meta/index.yaml deleted file mode 100644 index 045dd95793c..00000000000 --- a/docs/meta/index.yaml +++ /dev/null @@ -1,697 +0,0 @@ -!module -children: -- !module - description: A Terraform module for managing Google Cloud IP addresses. - name: address - source: https://github.com/terraform-google-modules/terraform-google-address -- !module - children: - - !module - name: bastion-group - path: modules/bastion-group - - !module - name: iap-tunneling - path: modules/iap-tunneling - description: This module will generate a bastion host vm compatible with OS Login - and IAP Tunneling that can be used to access internal VMs. - name: bastion-host - source: https://github.com/terraform-google-modules/terraform-google-bastion-host -- !module - children: - - !module - name: authorization - path: modules/authorization - - !module - name: udf - path: modules/udf - description: This module allows you to create opinionated Google Cloud Platform - BigQuery datasets and tables. - name: bigquery - source: https://github.com/terraform-google-modules/terraform-google-bigquery -- !module - children: - - !module - name: cloudbuild - path: modules/cloudbuild - description: A module for bootstrapping Terraform usage in a new GCP organization. - name: bootstrap - source: https://github.com/terraform-google-modules/terraform-google-bootstrap -- !module - description: A Terraform module to help you to manage Google Cloud Datastore. - name: cloud-datastore - source: https://github.com/terraform-google-modules/terraform-google-cloud-datastore -- !module - description: This module makes it easy to create and manage Google Cloud DNS public - or private zones, and their records. - name: cloud-dns - source: https://github.com/terraform-google-modules/terraform-google-cloud-dns -- !module - description: This module handles opinionated Google Cloud Platform Cloud NAT creation - and configuration. - name: cloud-nat - source: https://github.com/terraform-google-modules/terraform-google-cloud-nat -- !module - children: - - !module - name: agent-policy - path: modules/agent-policy - description: This module is a collection of submodules related to Google Cloud Operations - (Logging and Monitoring). - name: cloud-operations - source: https://github.com/terraform-google-modules/terraform-google-cloud-operations -- !module - children: - - !module - name: interconnect_attachment - path: modules/interconnect_attachment - - !module - name: interface - path: modules/interface - description: Manage a Cloud Router on GCP - name: cloud-router - source: https://github.com/terraform-google-modules/terraform-google-cloud-router -- !module - description: Terraform Module for deploying apps to Cloud Run, along with option - to map custom domain - name: cloud-run - source: https://github.com/GoogleCloudPlatform/terraform-google-cloud-run -- !module - children: - - !module - name: simple_bucket - path: modules/simple_bucket - description: This module makes it easy to create one or more GCS buckets, and assign - basic permissions on them to arbitrary users. - name: cloud-storage - source: https://github.com/terraform-google-modules/terraform-google-cloud-storage -- !module - children: - - !module - name: create_environment - path: modules/create_environment - description: Terraform Module for managing Cloud Composer - name: composer - source: https://github.com/terraform-google-modules/terraform-google-composer -- !module - children: - - !module - name: cos-coredns - path: modules/cos-coredns - - !module - name: cos-generic - path: modules/cos-generic - - !module - name: cos-mysql - path: modules/cos-mysql - description: This module simplifies deploying containers on GCE instances. - name: container-vm - source: https://github.com/terraform-google-modules/terraform-google-container-vm -- !module - children: - - !module - name: dataproc_profile - path: modules/dataproc_profile - - !module - name: hub_artifact - path: modules/hub_artifact - - !module - name: instance - path: modules/instance - - !module - name: namespace - path: modules/namespace - - !module - name: private_network - path: modules/private_network - - !module - name: wait_healthy - path: modules/wait_healthy - description: '[ALPHA] Terraform module for managing Cloud Data Fusion' - name: data-fusion - source: https://github.com/terraform-google-modules/terraform-google-data-fusion -- !module - children: - - !module - name: dataflow_bucket - path: modules/dataflow_bucket - description: This module handles opiniated Dataflow job configuration and deployments. - name: dataflow - source: https://github.com/terraform-google-modules/terraform-google-dataflow -- !module - children: - - !module - name: iap_firewall - path: modules/iap_firewall - - !module - name: instance - path: modules/instance - - !module - name: template_files - path: modules/template_files - description: 'This module will create DataLab instances with support for GPU instances. ' - name: datalab - source: https://github.com/terraform-google-modules/terraform-google-datalab -- !module - description: '' - name: endpoints-dns - source: https://github.com/terraform-google-modules/terraform-google-endpoints-dns -- !module - children: - - !module - name: event-folder-log-entry - path: modules/event-folder-log-entry - - !module - name: event-project-log-entry - path: modules/event-project-log-entry - - !module - name: repository-function - path: modules/repository-function - description: Terraform module for responding to logging events with a function - name: event-function - source: https://github.com/terraform-google-modules/terraform-google-event-function -- !module - description: This module helps create several folders under the same parent - name: folders - source: https://github.com/terraform-google-modules/terraform-google-folders -- !module - children: - - !module - name: client - path: modules/client - - !module - name: client_config - path: modules/client_config - - !module - name: client_gcs - path: modules/client_gcs - - !module - name: client_iam - path: modules/client_iam - - !module - name: cloudsql - path: modules/cloudsql - - !module - name: on_gke - path: modules/on_gke - - !module - name: real_time_enforcer - path: modules/real_time_enforcer - - !module - name: real_time_enforcer_organization_sink - path: modules/real_time_enforcer_organization_sink - - !module - name: real_time_enforcer_project_sink - path: modules/real_time_enforcer_project_sink - - !module - name: real_time_enforcer_roles - path: modules/real_time_enforcer_roles - - !module - name: rules - path: modules/rules - - !module - name: server - path: modules/server - - !module - name: server_config - path: modules/server_config - - !module - name: server_gcs - path: modules/server_gcs - - !module - name: server_iam - path: modules/server_iam - description: A Terraform module for installing Forseti on GCP - name: forseti - source: https://github.com/terraform-google-modules/terraform-google-forseti -- !module - children: - - !module - name: kubectl-wrapper - path: modules/kubectl-wrapper - description: A module for executing gcloud commands within Terraform. - name: gcloud - source: https://github.com/terraform-google-modules/terraform-google-gcloud -- !module - children: - - !module - name: gh-runner-gke - path: modules/gh-runner-gke - - !module - name: gh-runner-mig-container-vm - path: modules/gh-runner-mig-container-vm - - !module - name: gh-runner-mig-vm - path: modules/gh-runner-mig-vm - description: '[ALPHA] Module to create self-hosted GitHub Actions Runners on GCP' - name: github-actions-runners - source: https://github.com/terraform-google-modules/terraform-google-github-actions-runners -- !module - description: Installs GitLab on Kubernetes Engine - name: gke-gitlab - source: https://github.com/terraform-google-modules/terraform-google-gke-gitlab -- !module - description: A Terraform module for managing Google Groups - name: group - source: https://github.com/terraform-google-modules/terraform-google-group -- !module - description: '' - name: gsuite-export - source: https://github.com/terraform-google-modules/terraform-google-gsuite-export -- !module - description: This module handles opinionated Google Cloud Platform Healthcare datasets - and stores. - name: healthcare - source: https://github.com/terraform-google-modules/terraform-google-healthcare -- !module - children: - - !module - name: artifact_registry_iam - path: modules/artifact_registry_iam - - !module - name: audit_config - path: modules/audit_config - - !module - name: billing_accounts_iam - path: modules/billing_accounts_iam - - !module - name: custom_role_iam - path: modules/custom_role_iam - - !module - name: folders_iam - path: modules/folders_iam - - !module - name: helper - path: modules/helper - - !module - name: kms_crypto_keys_iam - path: modules/kms_crypto_keys_iam - - !module - name: kms_key_rings_iam - path: modules/kms_key_rings_iam - - !module - name: member_iam - path: modules/member_iam - - !module - name: organizations_iam - path: modules/organizations_iam - - !module - name: projects_iam - path: modules/projects_iam - - !module - name: pubsub_subscriptions_iam - path: modules/pubsub_subscriptions_iam - - !module - name: pubsub_topics_iam - path: modules/pubsub_topics_iam - - !module - name: secret_manager_iam - path: modules/secret_manager_iam - - !module - name: service_accounts_iam - path: modules/service_accounts_iam - - !module - name: storage_buckets_iam - path: modules/storage_buckets_iam - - !module - name: subnets_iam - path: modules/subnets_iam - description: This Terraform module makes it easier to non-destructively manage multiple - IAM roles for resources on Google Cloud Platform. - name: iam - source: https://github.com/terraform-google-modules/terraform-google-iam -- !module - children: - - !module - name: artifact_storage - path: modules/artifact_storage - description: '' - name: jenkins - source: https://github.com/terraform-google-modules/terraform-google-jenkins -- !module - description: Simple Cloud KMS module that allows managing a keyring, zero or more - keys in the keyring, and IAM role bindings on individual keys. - name: kms - source: https://github.com/terraform-google-modules/terraform-google-kms -- !module - children: - - !module - name: acm - path: modules/acm - - !module - name: asm - path: modules/asm - - !module - name: auth - path: modules/auth - - !module - name: beta-private-cluster - path: modules/beta-private-cluster - - !module - name: beta-private-cluster-update-variant - path: modules/beta-private-cluster-update-variant - - !module - name: beta-public-cluster - path: modules/beta-public-cluster - - !module - name: beta-public-cluster-update-variant - path: modules/beta-public-cluster-update-variant - - !module - name: binary-authorization - path: modules/binary-authorization - - !module - name: config-sync - path: modules/config-sync - - !module - name: hub - path: modules/hub - - !module - name: k8s-operator-crd-support - path: modules/k8s-operator-crd-support - - !module - name: private-cluster - path: modules/private-cluster - - !module - name: private-cluster-update-variant - path: modules/private-cluster-update-variant - - !module - name: safer-cluster - path: modules/safer-cluster - - !module - name: safer-cluster-update-variant - path: modules/safer-cluster-update-variant - - !module - name: services - path: modules/services - - !module - name: workload-identity - path: modules/workload-identity - description: A Terraform module for configuring GKE clusters. - name: kubernetes-engine - source: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine -- !module - description: Modular Regional TCP Load Balancer for GCE using target pool and forwarding - rule. - name: lb - source: https://github.com/GoogleCloudPlatform/terraform-google-lb -- !module - children: - - !module - name: dynamic_backends - path: modules/dynamic_backends - - !module - name: serverless_negs - path: modules/serverless_negs - description: Modular Global HTTP Load Balancer for GCE using forwarding rules. - name: lb-http - source: https://github.com/GoogleCloudPlatform/terraform-google-lb-http -- !module - description: Modular Internal Load Balancer for GCE using forwarding rules. - name: lb-internal - source: https://github.com/GoogleCloudPlatform/terraform-google-lb-internal -- !module - children: - - !module - name: bigquery - path: modules/bigquery - - !module - name: bq-log-alerting - path: modules/bq-log-alerting - - !module - name: pubsub - path: modules/pubsub - - !module - name: storage - path: modules/storage - description: This module allows you to create log exports at the project, folder, - or organization level. - name: log-export - source: https://github.com/terraform-google-modules/terraform-google-log-export -- !module - description: Modular Google Compute Engine managed instance group for Terraform. - exclude: true - name: managed-instance-group - source: https://github.com/GoogleCloudPlatform/terraform-google-managed-instance-group -- !module - children: - - !module - name: memcache - path: modules/memcache - description: A Terraform module for creating a fully functional Google Memorystore - (redis) instance. - name: memorystore - source: https://github.com/terraform-google-modules/terraform-google-memorystore -- !module - description: Modular NAT Gateway on Google Compute Engine for Terraform. - exclude: true - name: nat-gateway - source: https://github.com/GoogleCloudPlatform/terraform-google-nat-gateway -- !module - children: - - !module - name: fabric-net-firewall - path: modules/fabric-net-firewall - - !module - name: fabric-net-svpc-access - path: modules/fabric-net-svpc-access - - !module - name: firewall-rules - path: modules/firewall-rules - - !module - name: network-peering - path: modules/network-peering - - !module - name: routes - path: modules/routes - - !module - name: routes-beta - path: modules/routes-beta - - !module - name: subnets - path: modules/subnets - - !module - name: subnets-beta - path: modules/subnets-beta - - !module - name: vpc - path: modules/vpc - - !module - name: vpc-serverless-connector-beta - path: modules/vpc-serverless-connector-beta - description: A Terraform module that makes it easy to set up a new VPC Network in - GCP. - name: network - source: https://github.com/terraform-google-modules/terraform-google-network -- !module - children: - - !module - name: bucket_policy_only - path: modules/bucket_policy_only - - !module - name: domain_restricted_sharing - path: modules/domain_restricted_sharing - - !module - name: restrict_vm_external_ips - path: modules/restrict_vm_external_ips - - !module - name: skip_default_network - path: modules/skip_default_network - description: A Terraform module for managing GCP org policies. - name: org-policy - source: https://github.com/terraform-google-modules/terraform-google-org-policy -- !module - children: - - !module - name: app_engine - path: modules/app_engine - - !module - name: budget - path: modules/budget - - !module - name: core_project_factory - path: modules/core_project_factory - - !module - name: fabric-project - path: modules/fabric-project - - !module - name: gsuite_enabled - path: modules/gsuite_enabled - - !module - name: gsuite_group - path: modules/gsuite_group - - !module - name: project_services - path: modules/project_services - - !module - name: quota_manager - path: modules/quota_manager - - !module - name: shared_vpc - path: modules/shared_vpc - - !module - name: shared_vpc_access - path: modules/shared_vpc_access - - !module - name: svpc_service_project - path: modules/svpc_service_project - description: Opinionated Google Cloud Platform project creation and configuration - with Shared VPC, IAM, APIs, etc. - name: project-factory - source: https://github.com/terraform-google-modules/terraform-google-project-factory -- !module - children: - - !module - name: cloudiot - path: modules/cloudiot - description: This module makes it easy to create Google Cloud Pub/Sub topic and - subscriptions associated with the topic. - name: pubsub - source: https://github.com/terraform-google-modules/terraform-google-pubsub -- !module - children: - - !module - name: netweaver - path: modules/netweaver - - !module - name: sap_hana - path: modules/sap_hana - - !module - name: sap_hana_ha - path: modules/sap_hana_ha - - !module - name: sap_hana_python - path: modules/sap_hana/sap_hana_python - description: This module is a collection of multiple opinionated submodules to deploy - SAP Products. - name: sap - source: https://github.com/terraform-google-modules/terraform-google-sap -- !module - children: - - !module - name: project_cleanup - path: modules/project_cleanup - description: This modules makes it easy to set up a scheduled job to trigger events/run - functions. - name: scheduled-function - source: https://github.com/terraform-google-modules/terraform-google-scheduled-function -- !module - children: - - !module - name: gcs-object - path: modules/gcs-object - - !module - name: secret-infrastructure - path: modules/secret-infrastructure - description: '' - name: secret - source: https://github.com/terraform-google-modules/terraform-google-secret -- !module - children: - - !module - name: key-distributor - path: modules/key-distributor - description: This module allows easy creation of one or more service accounts, and - granting them basic roles. - name: service-accounts - source: https://github.com/terraform-google-modules/terraform-google-service-accounts -- !module - children: - - !module - name: slo - path: modules/slo - - !module - name: slo-native - path: modules/slo-native - - !module - name: slo-pipeline - path: modules/slo-pipeline - description: Create SLOs on GCP from custom Stackdriver metrics. Capability to export - SLOs to GCP services and other systems. - name: slo - source: https://github.com/terraform-google-modules/terraform-google-slo -- !module - children: - - !module - name: mssql - path: modules/mssql - - !module - name: mysql - path: modules/mysql - - !module - name: postgresql - path: modules/postgresql - - !module - name: private_service_access - path: modules/private_service_access - - !module - name: safer_mysql - path: modules/safer_mysql - description: Modular Cloud SQL database instance for Terraform. - name: sql-db - source: https://github.com/GoogleCloudPlatform/terraform-google-sql-db -- !module - description: A library of useful startup scripts to embed in VMs created by Terraform - name: startup-scripts - source: https://github.com/terraform-google-modules/terraform-google-startup-scripts -- !module - description: This module provides a way to get the shortnames for a given GCP region. - name: utils - source: https://github.com/terraform-google-modules/terraform-google-utils -- !module - children: - - !module - name: cluster - path: modules/cluster - description: Modular deployment of Vault on Google Compute Engine with Terraform - name: vault - source: https://github.com/terraform-google-modules/terraform-google-vault -- !module - children: - - !module - name: compute_disk_snapshot - path: modules/compute_disk_snapshot - - !module - name: compute_instance - path: modules/compute_instance - - !module - name: instance_template - path: modules/instance_template - - !module - name: mig - path: modules/mig - - !module - name: mig_with_percent - path: modules/mig_with_percent - - !module - name: preemptible_and_regular_instance_templates - path: modules/preemptible_and_regular_instance_templates - - !module - name: umig - path: modules/umig - description: This is a collection of opinionated submodules that can be used to - provision VMs in GCP. - name: vm - source: https://github.com/terraform-google-modules/terraform-google-vm -- !module - children: - - !module - name: access_level - path: modules/access_level - - !module - name: bridge_service_perimeter - path: modules/bridge_service_perimeter - - !module - name: regular_service_perimeter - path: modules/regular_service_perimeter - description: This module handles opinionated VPC Service Controls and Access Context - Manager configuration and deployments. - name: vpc-service-controls - source: https://github.com/terraform-google-modules/terraform-google-vpc-service-controls -- !module - children: - - !module - name: vpn_ha - path: modules/vpn_ha - description: A Terraform Module for setting up Google Cloud VPN - name: vpn - source: https://github.com/terraform-google-modules/terraform-google-vpn -name: terraform diff --git a/docs/meta/requirements.txt b/docs/meta/requirements.txt deleted file mode 100644 index 0d85a723f91..00000000000 --- a/docs/meta/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -Jinja2==2.11.3 -PyYAML==5.4 -requests==2.23.0 diff --git a/docs/meta/terraform.md.tmpl b/docs/meta/terraform.md.tmpl deleted file mode 100644 index 89e9c2ed726..00000000000 --- a/docs/meta/terraform.md.tmpl +++ /dev/null @@ -1,19 +0,0 @@ - - -# Terraform -The Cloud Foundation Toolkit includes over **{{ modules|length }}** Terraform modules. -There are also two end-to-end [examples](#examples) available. - -## Examples -These end-to-end examples are designed to be forked as a starting point: - -- [Example Foundation](https://github.com/terraform-google-modules/terraform-example-foundation) - Example repo showing how the CFT modules can be composed to build a secure cloud foundation. -- [CFT Fabric](https://github.com/terraform-google-modules/cloud-foundation-fabric) - This repo includes an advanced examples designed for prototyping. - -## Modules -{% for module in modules %} -* [{{ module.name() }}]({{ module.url() }}) - {{ module.description() }} -{% for child in module.children.values() %} - * [{{ child.name() }}]({{ child.url() }}) -{% endfor %} -{% endfor %} diff --git a/docs/terraform.md b/docs/terraform.md deleted file mode 100644 index 78af164ca9e..00000000000 --- a/docs/terraform.md +++ /dev/null @@ -1,205 +0,0 @@ - - -# Terraform -The Cloud Foundation Toolkit includes over **51** Terraform modules. -There are also two end-to-end [examples](#examples) available. - -## Examples -These end-to-end examples are designed to be forked as a starting point: - -- [Example Foundation](https://github.com/terraform-google-modules/terraform-example-foundation) - Example repo showing how the CFT modules can be composed to build a secure cloud foundation. -- [CFT Fabric](https://github.com/terraform-google-modules/cloud-foundation-fabric) - This repo includes an advanced examples designed for prototyping. - -## Modules -* [address](https://github.com/terraform-google-modules/terraform-google-address) - A Terraform module for managing Google Cloud IP addresses. -* [bastion-host](https://github.com/terraform-google-modules/terraform-google-bastion-host) - This module will generate a bastion host vm compatible with OS Login and IAP Tunneling that can be used to access internal VMs. - * [bastion-group](https://github.com/terraform-google-modules/terraform-google-bastion-host/tree/master/modules/bastion-group) - * [iap-tunneling](https://github.com/terraform-google-modules/terraform-google-bastion-host/tree/master/modules/iap-tunneling) -* [bigquery](https://github.com/terraform-google-modules/terraform-google-bigquery) - This module allows you to create opinionated Google Cloud Platform BigQuery datasets and tables. - * [authorization](https://github.com/terraform-google-modules/terraform-google-bigquery/tree/master/modules/authorization) - * [udf](https://github.com/terraform-google-modules/terraform-google-bigquery/tree/master/modules/udf) -* [bootstrap](https://github.com/terraform-google-modules/terraform-google-bootstrap) - A module for bootstrapping Terraform usage in a new GCP organization. - * [cloudbuild](https://github.com/terraform-google-modules/terraform-google-bootstrap/tree/master/modules/cloudbuild) -* [cloud-datastore](https://github.com/terraform-google-modules/terraform-google-cloud-datastore) - A Terraform module to help you to manage Google Cloud Datastore. -* [cloud-dns](https://github.com/terraform-google-modules/terraform-google-cloud-dns) - This module makes it easy to create and manage Google Cloud DNS public or private zones, and their records. -* [cloud-nat](https://github.com/terraform-google-modules/terraform-google-cloud-nat) - This module handles opinionated Google Cloud Platform Cloud NAT creation and configuration. -* [cloud-operations](https://github.com/terraform-google-modules/terraform-google-cloud-operations) - This module is a collection of submodules related to Google Cloud Operations (Logging and Monitoring). - * [agent-policy](https://github.com/terraform-google-modules/terraform-google-cloud-operations/tree/master/modules/agent-policy) -* [cloud-router](https://github.com/terraform-google-modules/terraform-google-cloud-router) - Manage a Cloud Router on GCP - * [interconnect_attachment](https://github.com/terraform-google-modules/terraform-google-cloud-router/tree/master/modules/interconnect_attachment) - * [interface](https://github.com/terraform-google-modules/terraform-google-cloud-router/tree/master/modules/interface) -* [cloud-run](https://github.com/GoogleCloudPlatform/terraform-google-cloud-run) - Terraform Module for deploying apps to Cloud Run, along with option to map custom domain -* [cloud-storage](https://github.com/terraform-google-modules/terraform-google-cloud-storage) - This module makes it easy to create one or more GCS buckets, and assign basic permissions on them to arbitrary users. - * [simple_bucket](https://github.com/terraform-google-modules/terraform-google-cloud-storage/tree/master/modules/simple_bucket) -* [composer](https://github.com/terraform-google-modules/terraform-google-composer) - Terraform Module for managing Cloud Composer - * [create_environment](https://github.com/terraform-google-modules/terraform-google-composer/tree/master/modules/create_environment) -* [container-vm](https://github.com/terraform-google-modules/terraform-google-container-vm) - This module simplifies deploying containers on GCE instances. - * [cos-coredns](https://github.com/terraform-google-modules/terraform-google-container-vm/tree/master/modules/cos-coredns) - * [cos-generic](https://github.com/terraform-google-modules/terraform-google-container-vm/tree/master/modules/cos-generic) - * [cos-mysql](https://github.com/terraform-google-modules/terraform-google-container-vm/tree/master/modules/cos-mysql) -* [data-fusion](https://github.com/terraform-google-modules/terraform-google-data-fusion) - [ALPHA] Terraform module for managing Cloud Data Fusion - * [dataproc_profile](https://github.com/terraform-google-modules/terraform-google-data-fusion/tree/master/modules/dataproc_profile) - * [hub_artifact](https://github.com/terraform-google-modules/terraform-google-data-fusion/tree/master/modules/hub_artifact) - * [instance](https://github.com/terraform-google-modules/terraform-google-data-fusion/tree/master/modules/instance) - * [namespace](https://github.com/terraform-google-modules/terraform-google-data-fusion/tree/master/modules/namespace) - * [private_network](https://github.com/terraform-google-modules/terraform-google-data-fusion/tree/master/modules/private_network) - * [wait_healthy](https://github.com/terraform-google-modules/terraform-google-data-fusion/tree/master/modules/wait_healthy) -* [dataflow](https://github.com/terraform-google-modules/terraform-google-dataflow) - This module handles opiniated Dataflow job configuration and deployments. - * [dataflow_bucket](https://github.com/terraform-google-modules/terraform-google-dataflow/tree/master/modules/dataflow_bucket) -* [datalab](https://github.com/terraform-google-modules/terraform-google-datalab) - This module will create DataLab instances with support for GPU instances. - * [iap_firewall](https://github.com/terraform-google-modules/terraform-google-datalab/tree/master/modules/iap_firewall) - * [instance](https://github.com/terraform-google-modules/terraform-google-datalab/tree/master/modules/instance) - * [template_files](https://github.com/terraform-google-modules/terraform-google-datalab/tree/master/modules/template_files) -* [endpoints-dns](https://github.com/terraform-google-modules/terraform-google-endpoints-dns) - -* [event-function](https://github.com/terraform-google-modules/terraform-google-event-function) - Terraform module for responding to logging events with a function - * [event-folder-log-entry](https://github.com/terraform-google-modules/terraform-google-event-function/tree/master/modules/event-folder-log-entry) - * [event-project-log-entry](https://github.com/terraform-google-modules/terraform-google-event-function/tree/master/modules/event-project-log-entry) - * [repository-function](https://github.com/terraform-google-modules/terraform-google-event-function/tree/master/modules/repository-function) -* [folders](https://github.com/terraform-google-modules/terraform-google-folders) - This module helps create several folders under the same parent -* [forseti](https://github.com/terraform-google-modules/terraform-google-forseti) - A Terraform module for installing Forseti on GCP - * [client](https://github.com/terraform-google-modules/terraform-google-forseti/tree/master/modules/client) - * [client_config](https://github.com/terraform-google-modules/terraform-google-forseti/tree/master/modules/client_config) - * [client_gcs](https://github.com/terraform-google-modules/terraform-google-forseti/tree/master/modules/client_gcs) - * [client_iam](https://github.com/terraform-google-modules/terraform-google-forseti/tree/master/modules/client_iam) - * [cloudsql](https://github.com/terraform-google-modules/terraform-google-forseti/tree/master/modules/cloudsql) - * [on_gke](https://github.com/terraform-google-modules/terraform-google-forseti/tree/master/modules/on_gke) - * [real_time_enforcer](https://github.com/terraform-google-modules/terraform-google-forseti/tree/master/modules/real_time_enforcer) - * [real_time_enforcer_organization_sink](https://github.com/terraform-google-modules/terraform-google-forseti/tree/master/modules/real_time_enforcer_organization_sink) - * [real_time_enforcer_project_sink](https://github.com/terraform-google-modules/terraform-google-forseti/tree/master/modules/real_time_enforcer_project_sink) - * [real_time_enforcer_roles](https://github.com/terraform-google-modules/terraform-google-forseti/tree/master/modules/real_time_enforcer_roles) - * [rules](https://github.com/terraform-google-modules/terraform-google-forseti/tree/master/modules/rules) - * [server](https://github.com/terraform-google-modules/terraform-google-forseti/tree/master/modules/server) - * [server_config](https://github.com/terraform-google-modules/terraform-google-forseti/tree/master/modules/server_config) - * [server_gcs](https://github.com/terraform-google-modules/terraform-google-forseti/tree/master/modules/server_gcs) - * [server_iam](https://github.com/terraform-google-modules/terraform-google-forseti/tree/master/modules/server_iam) -* [gcloud](https://github.com/terraform-google-modules/terraform-google-gcloud) - A module for executing gcloud commands within Terraform. - * [kubectl-wrapper](https://github.com/terraform-google-modules/terraform-google-gcloud/tree/master/modules/kubectl-wrapper) -* [github-actions-runners](https://github.com/terraform-google-modules/terraform-google-github-actions-runners) - [ALPHA] Module to create self-hosted GitHub Actions Runners on GCP - * [gh-runner-gke](https://github.com/terraform-google-modules/terraform-google-github-actions-runners/tree/master/modules/gh-runner-gke) - * [gh-runner-mig-container-vm](https://github.com/terraform-google-modules/terraform-google-github-actions-runners/tree/master/modules/gh-runner-mig-container-vm) - * [gh-runner-mig-vm](https://github.com/terraform-google-modules/terraform-google-github-actions-runners/tree/master/modules/gh-runner-mig-vm) -* [gke-gitlab](https://github.com/terraform-google-modules/terraform-google-gke-gitlab) - Installs GitLab on Kubernetes Engine -* [group](https://github.com/terraform-google-modules/terraform-google-group) - A Terraform module for managing Google Groups -* [gsuite-export](https://github.com/terraform-google-modules/terraform-google-gsuite-export) - -* [healthcare](https://github.com/terraform-google-modules/terraform-google-healthcare) - This module handles opinionated Google Cloud Platform Healthcare datasets and stores. -* [iam](https://github.com/terraform-google-modules/terraform-google-iam) - This Terraform module makes it easier to non-destructively manage multiple IAM roles for resources on Google Cloud Platform. - * [artifact_registry_iam](https://github.com/terraform-google-modules/terraform-google-iam/tree/master/modules/artifact_registry_iam) - * [audit_config](https://github.com/terraform-google-modules/terraform-google-iam/tree/master/modules/audit_config) - * [billing_accounts_iam](https://github.com/terraform-google-modules/terraform-google-iam/tree/master/modules/billing_accounts_iam) - * [custom_role_iam](https://github.com/terraform-google-modules/terraform-google-iam/tree/master/modules/custom_role_iam) - * [folders_iam](https://github.com/terraform-google-modules/terraform-google-iam/tree/master/modules/folders_iam) - * [helper](https://github.com/terraform-google-modules/terraform-google-iam/tree/master/modules/helper) - * [kms_crypto_keys_iam](https://github.com/terraform-google-modules/terraform-google-iam/tree/master/modules/kms_crypto_keys_iam) - * [kms_key_rings_iam](https://github.com/terraform-google-modules/terraform-google-iam/tree/master/modules/kms_key_rings_iam) - * [member_iam](https://github.com/terraform-google-modules/terraform-google-iam/tree/master/modules/member_iam) - * [organizations_iam](https://github.com/terraform-google-modules/terraform-google-iam/tree/master/modules/organizations_iam) - * [projects_iam](https://github.com/terraform-google-modules/terraform-google-iam/tree/master/modules/projects_iam) - * [pubsub_subscriptions_iam](https://github.com/terraform-google-modules/terraform-google-iam/tree/master/modules/pubsub_subscriptions_iam) - * [pubsub_topics_iam](https://github.com/terraform-google-modules/terraform-google-iam/tree/master/modules/pubsub_topics_iam) - * [secret_manager_iam](https://github.com/terraform-google-modules/terraform-google-iam/tree/master/modules/secret_manager_iam) - * [service_accounts_iam](https://github.com/terraform-google-modules/terraform-google-iam/tree/master/modules/service_accounts_iam) - * [storage_buckets_iam](https://github.com/terraform-google-modules/terraform-google-iam/tree/master/modules/storage_buckets_iam) - * [subnets_iam](https://github.com/terraform-google-modules/terraform-google-iam/tree/master/modules/subnets_iam) -* [jenkins](https://github.com/terraform-google-modules/terraform-google-jenkins) - - * [artifact_storage](https://github.com/terraform-google-modules/terraform-google-jenkins/tree/master/modules/artifact_storage) -* [kms](https://github.com/terraform-google-modules/terraform-google-kms) - Simple Cloud KMS module that allows managing a keyring, zero or more keys in the keyring, and IAM role bindings on individual keys. -* [kubernetes-engine](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine) - A Terraform module for configuring GKE clusters. - * [acm](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/tree/master/modules/acm) - * [asm](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/tree/master/modules/asm) - * [auth](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/tree/master/modules/auth) - * [beta-private-cluster](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/tree/master/modules/beta-private-cluster) - * [beta-private-cluster-update-variant](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/tree/master/modules/beta-private-cluster-update-variant) - * [beta-public-cluster](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/tree/master/modules/beta-public-cluster) - * [beta-public-cluster-update-variant](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/tree/master/modules/beta-public-cluster-update-variant) - * [binary-authorization](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/tree/master/modules/binary-authorization) - * [config-sync](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/tree/master/modules/config-sync) - * [hub](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/tree/master/modules/hub) - * [k8s-operator-crd-support](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/tree/master/modules/k8s-operator-crd-support) - * [private-cluster](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/tree/master/modules/private-cluster) - * [private-cluster-update-variant](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/tree/master/modules/private-cluster-update-variant) - * [safer-cluster](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/tree/master/modules/safer-cluster) - * [safer-cluster-update-variant](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/tree/master/modules/safer-cluster-update-variant) - * [services](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/tree/master/modules/services) - * [workload-identity](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/tree/master/modules/workload-identity) -* [lb](https://github.com/GoogleCloudPlatform/terraform-google-lb) - Modular Regional TCP Load Balancer for GCE using target pool and forwarding rule. -* [lb-http](https://github.com/GoogleCloudPlatform/terraform-google-lb-http) - Modular Global HTTP Load Balancer for GCE using forwarding rules. - * [dynamic_backends](https://github.com/GoogleCloudPlatform/terraform-google-lb-http/tree/master/modules/dynamic_backends) - * [serverless_negs](https://github.com/GoogleCloudPlatform/terraform-google-lb-http/tree/master/modules/serverless_negs) -* [lb-internal](https://github.com/GoogleCloudPlatform/terraform-google-lb-internal) - Modular Internal Load Balancer for GCE using forwarding rules. -* [log-export](https://github.com/terraform-google-modules/terraform-google-log-export) - This module allows you to create log exports at the project, folder, or organization level. - * [bigquery](https://github.com/terraform-google-modules/terraform-google-log-export/tree/master/modules/bigquery) - * [bq-log-alerting](https://github.com/terraform-google-modules/terraform-google-log-export/tree/master/modules/bq-log-alerting) - * [pubsub](https://github.com/terraform-google-modules/terraform-google-log-export/tree/master/modules/pubsub) - * [storage](https://github.com/terraform-google-modules/terraform-google-log-export/tree/master/modules/storage) -* [memorystore](https://github.com/terraform-google-modules/terraform-google-memorystore) - A Terraform module for creating a fully functional Google Memorystore (redis) instance. - * [memcache](https://github.com/terraform-google-modules/terraform-google-memorystore/tree/master/modules/memcache) -* [network](https://github.com/terraform-google-modules/terraform-google-network) - A Terraform module that makes it easy to set up a new VPC Network in GCP. - * [fabric-net-firewall](https://github.com/terraform-google-modules/terraform-google-network/tree/master/modules/fabric-net-firewall) - * [fabric-net-svpc-access](https://github.com/terraform-google-modules/terraform-google-network/tree/master/modules/fabric-net-svpc-access) - * [firewall-rules](https://github.com/terraform-google-modules/terraform-google-network/tree/master/modules/firewall-rules) - * [network-peering](https://github.com/terraform-google-modules/terraform-google-network/tree/master/modules/network-peering) - * [routes](https://github.com/terraform-google-modules/terraform-google-network/tree/master/modules/routes) - * [routes-beta](https://github.com/terraform-google-modules/terraform-google-network/tree/master/modules/routes-beta) - * [subnets](https://github.com/terraform-google-modules/terraform-google-network/tree/master/modules/subnets) - * [subnets-beta](https://github.com/terraform-google-modules/terraform-google-network/tree/master/modules/subnets-beta) - * [vpc](https://github.com/terraform-google-modules/terraform-google-network/tree/master/modules/vpc) - * [vpc-serverless-connector-beta](https://github.com/terraform-google-modules/terraform-google-network/tree/master/modules/vpc-serverless-connector-beta) -* [org-policy](https://github.com/terraform-google-modules/terraform-google-org-policy) - A Terraform module for managing GCP org policies. - * [bucket_policy_only](https://github.com/terraform-google-modules/terraform-google-org-policy/tree/master/modules/bucket_policy_only) - * [domain_restricted_sharing](https://github.com/terraform-google-modules/terraform-google-org-policy/tree/master/modules/domain_restricted_sharing) - * [restrict_vm_external_ips](https://github.com/terraform-google-modules/terraform-google-org-policy/tree/master/modules/restrict_vm_external_ips) - * [skip_default_network](https://github.com/terraform-google-modules/terraform-google-org-policy/tree/master/modules/skip_default_network) -* [project-factory](https://github.com/terraform-google-modules/terraform-google-project-factory) - Opinionated Google Cloud Platform project creation and configuration with Shared VPC, IAM, APIs, etc. - * [app_engine](https://github.com/terraform-google-modules/terraform-google-project-factory/tree/master/modules/app_engine) - * [budget](https://github.com/terraform-google-modules/terraform-google-project-factory/tree/master/modules/budget) - * [core_project_factory](https://github.com/terraform-google-modules/terraform-google-project-factory/tree/master/modules/core_project_factory) - * [fabric-project](https://github.com/terraform-google-modules/terraform-google-project-factory/tree/master/modules/fabric-project) - * [gsuite_enabled](https://github.com/terraform-google-modules/terraform-google-project-factory/tree/master/modules/gsuite_enabled) - * [gsuite_group](https://github.com/terraform-google-modules/terraform-google-project-factory/tree/master/modules/gsuite_group) - * [project_services](https://github.com/terraform-google-modules/terraform-google-project-factory/tree/master/modules/project_services) - * [quota_manager](https://github.com/terraform-google-modules/terraform-google-project-factory/tree/master/modules/quota_manager) - * [shared_vpc](https://github.com/terraform-google-modules/terraform-google-project-factory/tree/master/modules/shared_vpc) - * [shared_vpc_access](https://github.com/terraform-google-modules/terraform-google-project-factory/tree/master/modules/shared_vpc_access) - * [svpc_service_project](https://github.com/terraform-google-modules/terraform-google-project-factory/tree/master/modules/svpc_service_project) -* [pubsub](https://github.com/terraform-google-modules/terraform-google-pubsub) - This module makes it easy to create Google Cloud Pub/Sub topic and subscriptions associated with the topic. - * [cloudiot](https://github.com/terraform-google-modules/terraform-google-pubsub/tree/master/modules/cloudiot) -* [sap](https://github.com/terraform-google-modules/terraform-google-sap) - This module is a collection of multiple opinionated submodules to deploy SAP Products. - * [netweaver](https://github.com/terraform-google-modules/terraform-google-sap/tree/master/modules/netweaver) - * [sap_hana](https://github.com/terraform-google-modules/terraform-google-sap/tree/master/modules/sap_hana) - * [sap_hana_ha](https://github.com/terraform-google-modules/terraform-google-sap/tree/master/modules/sap_hana_ha) - * [sap_hana_python](https://github.com/terraform-google-modules/terraform-google-sap/tree/master/modules/sap_hana/sap_hana_python) -* [scheduled-function](https://github.com/terraform-google-modules/terraform-google-scheduled-function) - This modules makes it easy to set up a scheduled job to trigger events/run functions. - * [project_cleanup](https://github.com/terraform-google-modules/terraform-google-scheduled-function/tree/master/modules/project_cleanup) -* [secret](https://github.com/terraform-google-modules/terraform-google-secret) - - * [gcs-object](https://github.com/terraform-google-modules/terraform-google-secret/tree/master/modules/gcs-object) - * [secret-infrastructure](https://github.com/terraform-google-modules/terraform-google-secret/tree/master/modules/secret-infrastructure) -* [service-accounts](https://github.com/terraform-google-modules/terraform-google-service-accounts) - This module allows easy creation of one or more service accounts, and granting them basic roles. - * [key-distributor](https://github.com/terraform-google-modules/terraform-google-service-accounts/tree/master/modules/key-distributor) -* [slo](https://github.com/terraform-google-modules/terraform-google-slo) - Create SLOs on GCP from custom Stackdriver metrics. Capability to export SLOs to GCP services and other systems. - * [slo](https://github.com/terraform-google-modules/terraform-google-slo/tree/master/modules/slo) - * [slo-native](https://github.com/terraform-google-modules/terraform-google-slo/tree/master/modules/slo-native) - * [slo-pipeline](https://github.com/terraform-google-modules/terraform-google-slo/tree/master/modules/slo-pipeline) -* [sql-db](https://github.com/GoogleCloudPlatform/terraform-google-sql-db) - Modular Cloud SQL database instance for Terraform. - * [mssql](https://github.com/GoogleCloudPlatform/terraform-google-sql-db/tree/master/modules/mssql) - * [mysql](https://github.com/GoogleCloudPlatform/terraform-google-sql-db/tree/master/modules/mysql) - * [postgresql](https://github.com/GoogleCloudPlatform/terraform-google-sql-db/tree/master/modules/postgresql) - * [private_service_access](https://github.com/GoogleCloudPlatform/terraform-google-sql-db/tree/master/modules/private_service_access) - * [safer_mysql](https://github.com/GoogleCloudPlatform/terraform-google-sql-db/tree/master/modules/safer_mysql) -* [startup-scripts](https://github.com/terraform-google-modules/terraform-google-startup-scripts) - A library of useful startup scripts to embed in VMs created by Terraform -* [utils](https://github.com/terraform-google-modules/terraform-google-utils) - This module provides a way to get the shortnames for a given GCP region. -* [vault](https://github.com/terraform-google-modules/terraform-google-vault) - Modular deployment of Vault on Google Compute Engine with Terraform - * [cluster](https://github.com/terraform-google-modules/terraform-google-vault/tree/master/modules/cluster) -* [vm](https://github.com/terraform-google-modules/terraform-google-vm) - This is a collection of opinionated submodules that can be used to provision VMs in GCP. - * [compute_disk_snapshot](https://github.com/terraform-google-modules/terraform-google-vm/tree/master/modules/compute_disk_snapshot) - * [compute_instance](https://github.com/terraform-google-modules/terraform-google-vm/tree/master/modules/compute_instance) - * [instance_template](https://github.com/terraform-google-modules/terraform-google-vm/tree/master/modules/instance_template) - * [mig](https://github.com/terraform-google-modules/terraform-google-vm/tree/master/modules/mig) - * [mig_with_percent](https://github.com/terraform-google-modules/terraform-google-vm/tree/master/modules/mig_with_percent) - * [preemptible_and_regular_instance_templates](https://github.com/terraform-google-modules/terraform-google-vm/tree/master/modules/preemptible_and_regular_instance_templates) - * [umig](https://github.com/terraform-google-modules/terraform-google-vm/tree/master/modules/umig) -* [vpc-service-controls](https://github.com/terraform-google-modules/terraform-google-vpc-service-controls) - This module handles opinionated VPC Service Controls and Access Context Manager configuration and deployments. - * [access_level](https://github.com/terraform-google-modules/terraform-google-vpc-service-controls/tree/master/modules/access_level) - * [bridge_service_perimeter](https://github.com/terraform-google-modules/terraform-google-vpc-service-controls/tree/master/modules/bridge_service_perimeter) - * [regular_service_perimeter](https://github.com/terraform-google-modules/terraform-google-vpc-service-controls/tree/master/modules/regular_service_perimeter) -* [vpn](https://github.com/terraform-google-modules/terraform-google-vpn) - A Terraform Module for setting up Google Cloud VPN - * [vpn_ha](https://github.com/terraform-google-modules/terraform-google-vpn/tree/master/modules/vpn_ha) diff --git a/go.work b/go.work new file mode 100644 index 00000000000..30e1e9246a4 --- /dev/null +++ b/go.work @@ -0,0 +1,9 @@ +go 1.23.0 + +use ( + ./cli + ./infra/blueprint-test + ./infra/module-swapper + ./infra/utils/fbf + ./tflint-ruleset-blueprint +) diff --git a/infra/.gitignore b/infra/.gitignore index 98b0c61ac6f..4a234a4417d 100644 --- a/infra/.gitignore +++ b/infra/.gitignore @@ -7,4 +7,5 @@ terraform.tfstate.backup credentials.json *.credentials.json concourse/fly -.idea/ \ No newline at end of file +.idea/ +**/*.tfvars diff --git a/infra/USAGE.md b/infra/USAGE.md index ec2e4628d9e..401f7cc4486 100644 --- a/infra/USAGE.md +++ b/infra/USAGE.md @@ -6,4 +6,3 @@ fly --target cft login --team-name cft \ --concourse-url https://concourse.infra.cft.tips ``` - diff --git a/infra/blueprint-test/.gitignore b/infra/blueprint-test/.gitignore index c69c16a7bf0..60c3f20a4c6 100644 --- a/infra/blueprint-test/.gitignore +++ b/infra/blueprint-test/.gitignore @@ -44,6 +44,7 @@ crash.log **/Gemfile.lock test/fixtures/shared/terraform.tfvars +test/setup/terraform.tfvars test/integration/gcloud/config.sh test/integration/tmp diff --git a/infra/blueprint-test/CHANGELOG.md b/infra/blueprint-test/CHANGELOG.md new file mode 100644 index 00000000000..dcee0072a6f --- /dev/null +++ b/infra/blueprint-test/CHANGELOG.md @@ -0,0 +1,401 @@ +# Changelog + +## [0.17.4](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/blueprint-test/v0.17.3...infra/blueprint-test/v0.17.4) (2025-01-02) + + +### Bug Fixes + +* **deps:** update module github.com/gruntwork-io/terratest to v0.48.1 ([#2764](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2764)) ([982c73e](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/982c73e9a357a84b2912e0b09b3d4f4cb17aae6a)) +* **deps:** Update module golang.org/x/net to v0.33.0 ([#2784](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2784)) ([b2655aa](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/b2655aa9f00800808cf0b9612ccf7f35fbec49c8)) + +## [0.17.3](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/blueprint-test/v0.17.2...infra/blueprint-test/v0.17.3) (2024-12-11) + + +### Bug Fixes + +* **deps:** update module github.com/gruntwork-io/terratest to v0.48.0 ([#2744](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2744)) ([92291c4](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/92291c4dbe97af64e0f4830aff1b00fca8ffa60a)) +* **gcloud:** support Run cmd with quotes ([#2748](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2748)) ([561befd](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/561befd0995cb2478319892cdbe7229b5b599021)) + +## [0.17.2](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/blueprint-test/v0.17.1...infra/blueprint-test/v0.17.2) (2024-12-09) + + +### Bug Fixes + +* JSONEq() whitespace backwards compatible ([#2737](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2737)) ([97fe43b](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/97fe43bea25762ce6c690cf2853f3faffeaf19de)) + +## [0.17.1](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/blueprint-test/v0.17.0...infra/blueprint-test/v0.17.1) (2024-12-06) + + +### Bug Fixes + +* **blueprint-test:** add goroutines max ([#2711](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2711)) ([97891bf](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/97891bf2fe45aaa09b8ff8bf4fc9b4210270a989)) +* **deps:** update github.com/hashicorp/terraform-config-inspect digest to c404f82 ([#2729](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2729)) ([dbedb97](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/dbedb97fc05238bc0ee39cb89f872fe8334ba160)) +* **deps:** update module github.com/stretchr/testify to v1.10.0 ([#2712](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2712)) ([a86225a](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/a86225a69c6afb8e0c03ef81f753c4f36f3f067b)) +* **deps:** update module golang.org/x/sync to v0.10.0 ([#2734](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2734)) ([00f6c05](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/00f6c05674b8316d29b3d1aee11ad3d12748b6aa)) +* JSONEq() handle json doc whitespace ([#2719](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2719)) ([0315e56](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/0315e563b2efb9bcb01361e2dff99921da53d866)) + +## [0.17.0](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/blueprint-test/v0.16.3...infra/blueprint-test/v0.17.0) (2024-11-21) + + +### Features + +* **blueprint-test:** add cai helper ([#2689](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2689)) ([054d1b0](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/054d1b0efdee6bd145f2469ffa0ccbd1e8436aa6)) +* **blueprint-test:** add GetJSONPaths ([#2705](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2705)) ([53eeec7](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/53eeec7a5bf6296d6b7eb5883dbb5743c32a259a)) +* **blueprint-test:** add JSONPathEqs() ([#2706](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2706)) ([3065ac7](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/3065ac7350c19e53ae082d6953407c5dd406e8c1)) +* **blueprint-test:** add ParseKubectlJSONResult ([#2688](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2688)) ([c46b613](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/c46b6131159f8d482d10c403a185c1084f84806a)) + + +### Bug Fixes + +* **deps:** update module golang.org/x/mod to v0.22.0 ([#2682](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2682)) ([3777591](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/3777591e661570f421d7e1d9feb374ad9c25d8f7)) + +## [0.16.3](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/blueprint-test/v0.16.2...infra/blueprint-test/v0.16.3) (2024-10-30) + + +### Bug Fixes + +* **deps:** update go modules ([#2646](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2646)) ([ccb8bda](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/ccb8bda55715c36b176d49427f2cbe3162483f94)) +* **deps:** update module github.com/gruntwork-io/terratest to v0.47.2 ([#2632](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2632)) ([a915f90](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/a915f90aee6a45c9f25fed19312279d2f73f09cc)) +* **deps:** update module github.com/hashicorp/terraform-json to v0.23.0 ([#2677](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2677)) ([984d081](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/984d081b2095e45b1c81ecbc9d9cd07295fcb074)) +* **deps:** update module sigs.k8s.io/kustomize/kyaml to v0.18.0 ([#2629](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2629)) ([51dc21f](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/51dc21f17a512096e46834ece5e5f44a6cf6057e)) + +## [0.16.2](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/blueprint-test/v0.16.1...infra/blueprint-test/v0.16.2) (2024-09-05) + + +### Bug Fixes + +* **deps:** update github.com/hashicorp/terraform-config-inspect digest to 6714b46 ([#2488](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2488)) ([ca03f44](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/ca03f449f2ee9c928e702b8520e823cca83d445a)) +* **deps:** update module github.com/gruntwork-io/terratest to v0.47.1 ([#2578](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2578)) ([490d63b](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/490d63bf25395f71ad571cb7ade136663cafebef)) +* **deps:** update module github.com/tidwall/gjson to v1.17.3 ([#2474](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2474)) ([7fe2b66](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/7fe2b66489a4cf3be5d6451ca3177bfeb5c94249)) +* **deps:** update module golang.org/x/mod to v0.20.0 ([#2512](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2512)) ([13973f0](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/13973f0a4aade9b6a1bb24210a9a0d7038416cae)) +* **deps:** update module golang.org/x/mod to v0.21.0 ([#2574](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2574)) ([e288615](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/e288615361472de1940ca092388439864749cba1)) + +## [0.16.1](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/blueprint-test/v0.16.0...infra/blueprint-test/v0.16.1) (2024-07-23) + + +### Bug Fixes + +* **deps:** update go modules ([#2441](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2441)) ([8564e94](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/8564e948037cce59c7d2b1894cd1fb14af1fe1c4)) +* **deps:** update module github.com/gruntwork-io/terratest to v0.47.0 ([#2463](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2463)) ([d8c9a5d](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/d8c9a5d10468fbbe83f21490da7b9c4051ef69a0)) +* **deps:** update module sigs.k8s.io/kustomize/kyaml to v0.17.2 ([#2469](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2469)) ([db31f2e](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/db31f2e13291aa32b2d5f41fa58f36c5c4c3f226)) + +## [0.16.0](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/blueprint-test/v0.15.1...infra/blueprint-test/v0.16.0) (2024-06-28) + + +### Features + +* **tft:** add GetTFSetupJsonOutput() ([#2423](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2423)) ([1e7bc1b](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/1e7bc1b9c06f9da264226d9baba735636a05e82d)) +* **tft:** add WithParallelism() ([#2424](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2424)) ([7167d4e](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/7167d4e03b82a65db0bc94a4ad91ef42edb6cb2d)) + + +### Bug Fixes + +* **deps:** bump github.com/hashicorp/go-getter from 1.7.4 to 1.7.5 in /infra/blueprint-test ([#2434](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2434)) ([6b104e4](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/6b104e41844353a18524380786b92947665ec915)) +* **deps:** update dependency go to v1.22.4 ([#2412](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2412)) ([46b82dc](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/46b82dcf73fb1e5d9ea1399c67d08797bffbe0e0)) +* **deps:** update github.com/hashicorp/terraform-config-inspect digest to 271db41 ([#2411](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2411)) ([e1eb0d1](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/e1eb0d173381e72d4e5a00ed09121509a73b59fc)) +* **deps:** update module golang.org/x/mod to v0.18.0 ([#2403](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2403)) ([eaef7c8](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/eaef7c8711097c1b65bcdeb6356f1ee4140f13b8)) + +## [0.15.1](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/blueprint-test/v0.15.0...infra/blueprint-test/v0.15.1) (2024-05-24) + + +### Bug Fixes + +* **deps:** update module github.com/gruntwork-io/terratest to v0.46.15 ([#2374](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2374)) ([79e086f](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/79e086fd4ed37253c99b385c5a6d07746c82eefa)) +* **deps:** update module sigs.k8s.io/kustomize/kyaml to v0.17.1 ([#2372](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2372)) ([8210438](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/8210438fdb2ec2402b580469f4ebfa82572bca46)) +* **tft:** plumb GetJsonOutput() key ([#2385](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2385)) ([1b04124](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/1b041242842549ee0f01c9de1fa0ff55b42793bb)) + +## [0.15.0](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/blueprint-test/v0.14.1...infra/blueprint-test/v0.15.0) (2024-05-20) + + +### Features + +* **tft:** add GetJsonOutput() ([#2356](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2356)) ([1653110](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/1653110c06d0f0802bc7dc51f0d835a721520018)) +* **tft:** log TF version ([#2323](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2323)) ([2a2857c](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/2a2857c3cbe822a1364f6234776f77bfc14bc2ea)) + + +### Bug Fixes + +* **deps:** update module github.com/hashicorp/terraform-config-inspect to v0.0.0-20240509232506-4708120f8f30 ([#2344](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2344)) ([899a03c](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/899a03ceeef77e4a767bdda09496e39fbbc29f38)) +* **deps:** update module github.com/hashicorp/terraform-json to v0.22.0 ([#2359](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2359)) ([63536b5](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/63536b5979de707043ed890fe6d4d43d224480b3)) +* **deps:** update module github.com/hashicorp/terraform-json to v0.22.1 ([#2366](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2366)) ([f9684bb](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/f9684bbabece53d8744b2fb6fcc80778ff08f358)) + +## [0.14.1](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/blueprint-test/v0.14.0...infra/blueprint-test/v0.14.1) (2024-04-30) + + +### Bug Fixes + +* **deps:** bump github.com/hashicorp/go-getter from 1.7.2 to 1.7.4 in /infra/blueprint-test ([#2298](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2298)) ([aefdf42](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/aefdf42631fe029cc3fee30a050a531ea15d9493)) +* **deps:** bump golang.org/x/net from 0.17.0 to 0.23.0 in /infra/blueprint-test ([#2272](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2272)) ([1dd1e00](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/1dd1e008953eac31f78d15d5c996e983508deb2a)) +* **deps:** update module github.com/gruntwork-io/terratest to v0.46.14 ([#2303](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2303)) ([d7bd472](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/d7bd472ca6e5179ce36b273addc03f7104c32cf4)) +* **deps:** update module github.com/hashicorp/terraform-json to v0.21.0 ([#2289](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2289)) ([77e37e6](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/77e37e65c4e7b72b97f79ca7fd714e2dc38bf6cc)) + +## [0.14.0](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/blueprint-test/v0.13.2...infra/blueprint-test/v0.14.0) (2024-04-09) + + +### Features + +* support plan assertions in blueprint test ([#2258](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2258)) ([bb29bbe](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/bb29bbef8137f9e084e3269f62bd174e50121281)) + + +### Bug Fixes + +* **deps:** update module golang.org/x/mod to v0.17.0 ([#2252](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2252)) ([6a2a1e1](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/6a2a1e1d74b3029e489a106fe93d84cf6a42924e)) +* **deps:** update module sigs.k8s.io/kustomize/kyaml to v0.17.0 ([#2255](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2255)) ([f2e762e](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/f2e762eeb807b79b381f5a95f790f8c85a54d78e)) + +## [0.13.2](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/blueprint-test/v0.13.1...infra/blueprint-test/v0.13.2) (2024-03-21) + + +### Bug Fixes + +* **deps:** update module github.com/gruntwork-io/terratest to v0.46.12 ([#2216](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2216)) ([765f4bb](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/765f4bbd33b3d5752f94a471beb3262609ed1e62)) +* **deps:** update module github.com/gruntwork-io/terratest to v0.46.13 ([#2221](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2221)) ([b0e2940](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/b0e2940f4c45f3aa790bcf276d32ece681383c23)) +* **tft:** Correct message in DefaultVerify ([#2213](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2213)) ([2d67c60](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/2d67c60903880b2d9dacfdf8b34a53be4a20c6ed)) + +## [0.13.1](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/blueprint-test/v0.13.0...infra/blueprint-test/v0.13.1) (2024-03-14) + + +### Bug Fixes + +* **deps:** bump google.golang.org/protobuf from 1.31.0 to 1.33.0 in /infra/blueprint-test ([#2207](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2207)) ([43f002f](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/43f002fe3302fcbe3de487699354c76a42b03bce)) +* **deps:** update go modules ([#2195](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2195)) ([af8a62c](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/af8a62cc8c0db6fe00616921d1bed527253ebaa6)) +* **deps:** update module github.com/tidwall/gjson to v1.17.1 ([#2172](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2172)) ([3f3199e](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/3f3199e05baf84360fbb4a066b59cb0119ba1067)) + +## [0.13.0](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/blueprint-test/v0.12.1...infra/blueprint-test/v0.13.0) (2024-02-16) + + +### Features + +* **tft:** add GetStringOutputList ([#2159](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2159)) ([48810af](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/48810afccc60d8ad569c5a8ff202e3bb38425891)) + +## [0.12.1](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/blueprint-test/v0.12.0...infra/blueprint-test/v0.12.1) (2024-02-13) + + +### Bug Fixes + +* **tft:** parallel-safe a few edge cases ([#2145](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2145)) ([3ba8188](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/3ba81882a48222f7897f9cabbc4388b494ce3326)) + +## [0.12.0](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/blueprint-test/v0.11.1...infra/blueprint-test/v0.12.0) (2024-02-12) + + +### Features + +* **tft:** add filemutex for tft plugin cache ([#2140](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2140)) ([3568196](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/3568196e2517fb62479115f6f615c827a4734e60)) + + +### Bug Fixes + +* **deps:** update module github.com/gruntwork-io/terratest to v0.46.11 ([#2092](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2092)) ([daa1417](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/daa14179116ca3fb9263cb20fc19955b436ab7b5)) +* **deps:** update module github.com/gruntwork-io/terratest to v0.46.9 ([#2080](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2080)) ([da6b03f](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/da6b03f3e6eaabb8dbab8b21236cf1ffaffb8cd7)) +* **deps:** update module golang.org/x/mod to v0.15.0 ([#2131](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2131)) ([b0d5ff0](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/b0d5ff035fbe4d78758f385d0c8f731c30715952)) + +## [0.11.1](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/blueprint-test/v0.11.0...infra/blueprint-test/v0.11.1) (2023-12-19) + + +### Bug Fixes + +* **deps:** bump golang.org/x/crypto from 0.14.0 to 0.17.0 in /infra/blueprint-test ([#2049](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2049)) ([bd2df12](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/bd2df128855540c3eba71b8690300d01d06c7bfc)) + +## [0.11.0](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/blueprint-test/v0.10.3...infra/blueprint-test/v0.11.0) (2023-12-13) + + +### Features + +* **tft:** skip logging sensitive setup outputs ([#2035](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2035)) ([775a50c](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/775a50ca6953196d62be223fd530ea1344410141)) +* update to GO 1.21 ([#2037](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2037)) ([e76ff55](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/e76ff55afb5ee9c8c57b7b8a802acdab1ca15130)) + +## [0.10.3](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/blueprint-test/v0.10.2...infra/blueprint-test/v0.10.3) (2023-12-08) + + +### Bug Fixes + +* **deps:** update go modules ([#2018](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2018)) ([bca6c94](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/bca6c9483bfde81aa05da832d436908b6d40ab1d)) + +## [0.10.2](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/blueprint-test/v0.10.1...infra/blueprint-test/v0.10.2) (2023-12-05) + + +### Bug Fixes + +* add bq init step to create local config file ([#2000](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2000)) ([ee8cc7c](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/ee8cc7c8eacd3ce5c00272259a2149279a28591b)) +* bump the all group in /infra/blueprint-test with 1 update ([#1965](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1965)) ([31e2ab3](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/31e2ab34fd45a8277ba0b41f98bb5a8c2b220f6c)) + +## [0.10.1](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/blueprint-test/v0.10.0...infra/blueprint-test/v0.10.1) (2023-11-08) + + +### Bug Fixes + +* bump the all group in /infra/blueprint-test with 2 updates ([#1952](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1952)) ([04d9184](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/04d9184146efb8d6b8b94d67a91e0d4e2f748220)) +* changing order in which args and commonOpts slices are combined ([#1943](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1943)) ([2dc6b44](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/2dc6b44462152b208646a7311d55d1bf4c9eb16a)) + +## [0.10.0](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/blueprint-test/v0.9.2...infra/blueprint-test/v0.10.0) (2023-11-01) + + +### Features + +* adding support for bq ([#1878](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1878)) ([d4b0385](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/d4b0385ba88ced63bbe2ba7cd84a7d759cc4d10c)) + +## [0.9.2](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/blueprint-test/v0.9.1...infra/blueprint-test/v0.9.2) (2023-10-25) + + +### Bug Fixes + +* bump google.golang.org/grpc from 1.58.0 to 1.58.3 in /infra/blueprint-test ([#1906](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1906)) ([5af9071](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/5af907153307d9acc8cec009b468bdbe9a2a7bff)) +* **deps:** update go modules ([#1865](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1865)) ([df69583](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/df69583d3fdad5626e8cbf90f2c787428d064a48)) + +## [0.9.1](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/blueprint-test/v0.9.0...infra/blueprint-test/v0.9.1) (2023-10-17) + + +### Bug Fixes + +* **deps:** bump golang.org/x/net from 0.15.0 to 0.17.0 in /infra/blueprint-test ([#1874](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1874)) ([334a17e](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/334a17e81720b7235971b15ee26f6f6cbfb049c8)) + +## [0.9.0](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/blueprint-test/v0.8.1...infra/blueprint-test/v0.9.0) (2023-09-29) + + +### Features + +* allow user to specify/override outputs from the setup stage ([#1741](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1741)) ([8365efb](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/8365efbff6938727ec712a86ad19cd973a2f064d)) + +## [0.8.1](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/blueprint-test/v0.8.0...infra/blueprint-test/v0.8.1) (2023-09-08) + + +### Bug Fixes + +* update bpt go modules ([#1820](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1820)) ([5f35095](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/5f350959aa03b4f73116cbfa0e13a73ebf359ca3)) + +## [0.8.0](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/blueprint-test/v0.7.0...infra/blueprint-test/v0.8.0) (2023-08-17) + + +### Features + +* add utils.PollE ([#1729](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1729)) ([e256d2f](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/e256d2fdfb10a8a7f86a37145f3388df353cea13)) + + +### Bug Fixes + +* **deps:** update bpt go modules ([#1748](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1748)) ([f90deec](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/f90deec4e6a9728505acee667ae1083a8a270a84)) +* logging and TestKRMSimpleBlueprint timeout ([#1774](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1774)) ([5bc7a56](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/5bc7a564422144df44a5a488c33e6e7d7bb00de1)) + +## [0.7.0](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/blueprint-test/v0.6.1...infra/blueprint-test/v0.7.0) (2023-07-20) + + +### Features + +* Add HTTP Assert test helpers ([#1707](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1707)) ([9c423f9](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/9c423f910c14899eb311bf9b026439eb70378602)) +* add retry for kpt commands ([#1717](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1717)) ([55c9c8d](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/55c9c8dcf85b8eacdb8ff2c9d19582a445e192ab)) + +## [0.6.1](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/blueprint-test/v0.6.0...infra/blueprint-test/v0.6.1) (2023-06-27) + + +### Bug Fixes + +* blueprint-test tests ([#1675](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1675)) ([6ed5385](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/6ed538548fb9fd91a81663796efecb5e53c8a66e)) +* update bpt go modules ([#1677](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1677)) ([4c9aaec](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/4c9aaeca68db7198165d52227b3d03752d8f817d)) + +## [0.6.0](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/blueprint-test/v0.5.2...infra/blueprint-test/v0.6.0) (2023-06-13) + + +### Features + +* update to bpt GO 1.20 and rework krm test ([#1619](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1619)) ([50c2ab3](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/50c2ab3165ab5eb159a8569ec90cd1518d427b7c)) + +## [0.5.2](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/blueprint-test/v0.5.1...infra/blueprint-test/v0.5.2) (2023-05-11) + + +### Bug Fixes + +* bump GO modules and address lint ([#1541](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1541)) ([6b76dc1](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/6b76dc17db4e64a6aff52b980d5c3ac01b2a901a)) + +## [0.5.1](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/blueprint-test/v0.5.0...infra/blueprint-test/v0.5.1) (2023-03-20) + + +### Bug Fixes + +* kpt tests without existing working dir ([#1447](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1447)) ([c9cc7af](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/c9cc7af901d8ff6c100358c540eb9eea5f8015a4)) + +## [0.5.0](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/blueprint-test/v0.4.1...infra/blueprint-test/v0.5.0) (2023-02-28) + + +### Features + +* update blueprint-test to GO 1.18 and test fixes ([#1373](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1373)) ([0234ad6](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/0234ad6f0da169aec58a9fd848094907aa4b6851)) + + +### Bug Fixes + +* **deps:** update go modules ([#1416](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1416)) ([5f01e1f](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/5f01e1ffd04d9ad47bf7bdb28c92716028d1977f)) +* update blueprint-test for kpt v1.0.0-beta.16+ ([#1367](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1367)) ([3613491](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/36134916e2fd859b0aea4384c1b4a5ab79d65eac)) + +## [0.4.1](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/blueprint-test/v0.4.0...infra/blueprint-test/v0.4.1) (2023-01-10) + + +### Bug Fixes + +* **deps:** update for go-sdk refactor ([#1217](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1217)) ([5c50728](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/5c50728b825fda6187ca9b73151741c733e623ec)) +* remove terraform plan file needed for the terraform vet execution after validation ([#1321](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1321)) ([1edc5df](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/1edc5df7267c78a917eec0f2b5ad3f4024ca5e98)) + +## [0.4.0](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/blueprint-test/v0.3.0...infra/blueprint-test/v0.4.0) (2022-12-02) + + +### Features + +* allow var overrides for workspace mode ([#1292](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1292)) ([f6ffa1f](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/f6ffa1f60039f03a6fb77e122894641caa739fef)) +* enable no color ([#1293](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1293)) ([06fae23](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/06fae232e1f97b1d78df6809eff65898fddb5268)) +* new test strategy for redeploy validation ([#1286](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1286)) ([de5d509](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/de5d5090980f5f12e0321365a935e119493518ec)) + +## [0.3.0](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/blueprint-test/v0.2.0...infra/blueprint-test/v0.3.0) (2022-08-30) + + +### Features + +* add project ID param to tft vet ([#1226](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1226)) ([e95dc64](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/e95dc64d9b4596135cfc8bac481402c739e1c6a4)) +* blueprint-test file logger ([a284f16](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/a284f164fccc58db86dfb8999b8013642e5d2bd7)) + +## [0.2.0](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/blueprint-test/v0.1.0...infra/blueprint-test/v0.2.0) (2022-08-03) + + +### Features + +* add support for retryable tf errors ([#1198](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1198)) ([bcf67d6](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/bcf67d6d5aa193077c961c529f14df56e80b9e7a)) +* Add support for terraform vet in blueprint test ([#1191](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1191)) ([e3179df](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/e3179dfc63abf2fd7cf291b24531abbe3cba02ff)) +* expose setup outputs via tft ([#1203](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1203)) ([4ea786f](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/4ea786f947dfa58799f5e4736e511ca59668958b)) + +## [0.1.0](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/blueprint-test/v0.0.1...infra/blueprint-test/v0.1.0) (2022-06-13) + + +### Features + +* add support for backend configuration tf blueprint test ([#1165](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1165)) ([442b49e](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/442b49ebe347d2415840967200d280bdf590cbe1)) + +## [0.0.1](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/blueprint-test-v0.0.1...infra/blueprint-test/v0.0.1) (2022-06-07) + + +### Features + +* add getter for krmt build directory ([#1106](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1106)) ([fd68a6b](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/fd68a6bdc9a90d0f340fdad80bfcfc8119137a0f)) +* add golden string sanitizer option ([#1109](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1109)) ([0e962c6](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/0e962c6ff0f5fa4f38cab62c31e78bd67d5923de)) +* add goldenfile sanitizers ([#1074](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1074)) ([c98be35](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/c98be3561409b0051a1a5b2502eb603766d2c4a5)) +* add helper for goldenfiles ([#1067](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1067)) ([1bf5397](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/1bf53970d457786fa2b3dc79a42b77887d1c7bb5)) +* add KRM benchmark example ([#982](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/982)) ([6854aa0](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/6854aa08ed6f5edeba8884aec1d89745d1f64a2b)) +* add support for runf in gcloud blueprint test ([#1070](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1070)) ([3842083](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/3842083683a3218d7864efa5e545dc4958cc3ecb)) +* add test result utils ([#1005](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1005)) ([608c349](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/608c349bf8e4b68bb1f211094de5e8c91f881521)) +* add test yaml config ([#986](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/986)) ([fe03487](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/fe034876e2e780bce0906252026115c851580f7a)) +* add the first draft of the user guide for the test framework ([#983](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/983)) ([5dcd154](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/5dcd1546f9d5ec5ab39743e5181feffb8877a1ea)) +* add transform func for removing a json value by path ([#1110](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1110)) ([9f9a444](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/9f9a444009fd9a35d074efff5803d2a2fb8572e8)) +* adds logic for copying additional resources to the build directory ([#1118](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1118)) ([8383c92](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/8383c92c8a54322eebca8a058550b46396f043aa)) +* **cli:** Allow customization of setup sa key ([#1065](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1065)) ([7c9f83c](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/7c9f83caf2fe77b69dd4af8bef6c3496b14d3af2)) +* export GetTFOptions method for tft ([#1003](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1003)) ([5e783cf](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/5e783cf7c716104ad113064d9e1b9aea4dc7a999)) +* initialize KRM blueprint testing ([#977](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/977)) ([2953e46](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/2953e46e28f4085c733243e3a4914f52aa105f2e)) +* initialize terraform blueprint testing ([#945](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/945)) ([723b19c](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/723b19ce02d0e04e1f12f117aa2fe9ba44cad5e5)) +* remove list-setter dep for kpt ([#1088](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1088)) ([bad09af](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/bad09af2b45598ca08990d3bcb722560b661b3e0)) +* support complex type setup outputs ([#997](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/997)) ([39b4ef0](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/39b4ef08bb23b352ea6ff7073f942ec0b5a50fc7)) +* **tft:** activate creds from setup ([#1062](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1062)) ([08c972c](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/08c972c3768cae717df3f33a43785bf21b183a13)) + + +### Bug Fixes + +* **bptest:** compute config discovery from int test dir ([#1025](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1025)) ([bea525b](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/bea525b1cf5203f522bd0e9f42bce45605885c41)) +* bumb the gjson version to the latestg ([#1011](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1011)) ([2c665e7](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/2c665e7bd84a189f225fbbf42f4ea5d0b69fa42a)) +* **krm-test:** add option for custom setters ([#981](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/981)) ([78afb5d](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/78afb5d4cfd83922d82980a6493bd0a7dab78e12)) +* mark tests as skipped due to test config ([#1063](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1063)) ([0687139](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/068713996f3641114bf1fed1937d4cec09ddc3f5)) +* recognize prow PR commit ([#993](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/993)) ([e8c47de](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/e8c47de6a66b1dde620da57ecd752f59de32b7f4)) +* upgrades kpt-sdk dependency to remove the gakekeeper lib reference ([#1090](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1090)) ([727d5c1](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/727d5c1b1fafbd45ddfaea7cad99da379891fc6e)) diff --git a/infra/blueprint-test/Makefile b/infra/blueprint-test/Makefile new file mode 100644 index 00000000000..60703bc97cb --- /dev/null +++ b/infra/blueprint-test/Makefile @@ -0,0 +1,6 @@ +SHELL := /bin/bash +GOLANGCI_VERSION := 1.63.3 + +.PHONY: docker_go_lint +docker_go_lint: + docker run --rm -v $(PWD):/blueprint-test -w /blueprint-test golangci/golangci-lint:v${GOLANGCI_VERSION} golangci-lint --timeout=5m -v run diff --git a/infra/blueprint-test/README.md b/infra/blueprint-test/README.md index b1c9bd14b8d..b58ea77573c 100644 --- a/infra/blueprint-test/README.md +++ b/infra/blueprint-test/README.md @@ -9,14 +9,14 @@ Apart from the necessity of including a testing framework as part of our GCP blu Considering the above, our test framework has been developed (details in the following sections) with backward compatibility to allow for current tests to keep functioning. -*Note: If you have a question about the test framework, feel free to ask it on our user group. +> [!NOTE] +> If you have a question about the test framework, feel free to ask it on our user group. Feature requests can also be submitted as Issues.* # 2. Framework Concepts -``` -Note: The best reference documentation for the framework is the [autogenerated documentation](https://pkg.go.dev/github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test). -``` +> [!NOTE] +> The best reference documentation for the framework is the [autogenerated documentation](https://pkg.go.dev/github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test). The test framework uses Golang with the testing package at its core to allow for creation and setup of GCP blueprints integration tests. @@ -74,7 +74,7 @@ This section aims at explaining the process of developing a custom integration t ## 3.1 Terraform based blueprints -### 3.1.1 Prepare a test example +### 3.1.1 Prepare a test example The first step in the process is to create an example that leverages a TF module from the blueprint as illustrated in section 2.1. The example creation process consists of two steps: #### 3.1.1.1 Create the example configuration @@ -85,8 +85,8 @@ In this step you will create an example directory under the `examples` directory ```hcl // name for your example module -module "mysql-db" { - // set the source for the module being tested as part of the +module "mysql-db" { + // set the source for the module being tested as part of the // example source = "../../modules/mysql" ... @@ -101,7 +101,7 @@ variable "project_id" { description = "The ID of the project in which resources will be provisioned." type = string } - + variable "db_name" { description = "The name of the SQL Database instance" default = "example-mysql-public" @@ -113,31 +113,27 @@ These variables are now available for access within the `main.tf` file and can b ```hcl // name for your example module -module "mysql-db" { +module "mysql-db" { ... // variables required by the source module random_instance_name = true database_version = "MYSQL_5_6" - - // variable being set from the example module variables configuration. + + // variable being set from the example module variables configuration. project_id = var.project_id ... } ``` -``` -Note: -Variables defined in the example module’s variables (normally variables.tf) configuration can be set 1) from -wrapping modules calling the example module (e.g. fixtures) or 2) using environment variables by prefixing -the environment variable with “TF_VAR”. - -E.g. to set the project_id variable (above), setting the value in a “TF_VAR_project_id” environment variable -would automatically populate its value upon execution. This is illustrated the file -`test/setup/simple_tf_module/outputs.tf` where the `project_id` is being exported as an env variable. -``` +> [!NOTE] +> Variables defined in the example module’s variables (normally variables.tf) configuration can be set 1) from wrapping modules calling the example module (e.g. fixtures) or 2) using environment variables by prefixing the environment variable with `TF_VAR`. +> +> E.g. to set the project_id variable (above), setting the value in a `TF_VAR_project_id` environment variable +> would automatically populate its value upon execution. This is illustrated the file +`test/setup/outputs.tf` where the `project_id` is being exported as an env variable. #### 3.1.1.2 Output variables for the test -Upon successful execution of your example module, you will most likely need outputs for resources being provisioned to validate and assert in your test. This is done using outputs in Terraform. +Upon successful execution of your example module, you will most likely need outputs for resources being provisioned to validate and assert in your test. This is done using outputs in Terraform. 1. In the `examples/mysql-public` directory create a file `outputs.tf`. The content for the file should be as follows: ```hcl @@ -154,29 +150,29 @@ Complete code files for the example module can be found [here](https://github.co ### 3.1.2 Write an integration test After creating the example configuration, your example will automatically be tested and no further action is required. -However, if you need to make custom assertions regarding the resources the blueprint will create, you should create an integration test in Go using the (testing)[https://pkg.go.dev/testing] package. Custom assertions will mostly involve making an API calls to GCP (via gcloud commands) to assert a resource is configured as expected. +However, if you need to make custom assertions regarding the resources the blueprint will create, you should create an integration test in Go using the [testing](https://pkg.go.dev/testing) package. Custom assertions will mostly involve making an API calls to GCP (via gcloud commands) to assert a resource is configured as expected. The entire integration test explained below can be found [here](https://github.com/terraform-google-modules/terraform-google-sql-db/blob/master/test/integration/mysql-public/mysql_public_test.go). The first step in writing the test is to wire it up with the required packages and methods signatures that the test framework expects as follows: 1. Cd in the `test/integration/mysql-public` directory or create it if it's not present already in the blueprint and the cd into it. -2. Create file in `mysql_public_test.go` with the following content: +2. Create file in `mysql_public_test.go` with the following content: As a good practice use this convention to name your test files: _test.go - + ```go // define test package name package mysql_public - + import ( "fmt" "testing" - + // import the blueprints test framework modules for testing and assertions - "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/gcloud" + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/gcloud" "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/tft" "github.com/stretchr/testify/assert" ) - + // name the function as Test* func TestMySqlPublicModule(t *testing.T) { ... @@ -185,9 +181,9 @@ func TestMySqlPublicModule(t *testing.T) { // define and write a custom verifier for this test case call the default verify for confirming no additional changes mySqlT.DefineVerify(func(assert *assert.Assertions) { // perform default verification ensuring Terraform reports no additional changes on an applied blueprint - mySqlT.DefaultVerify(assert) + mySqlT.DefaultVerify(assert) // custom logic for the test continues below - ... + ... }) // call the test function to execute the integration test mySqlT.Test() @@ -196,14 +192,14 @@ func TestMySqlPublicModule(t *testing.T) { The next step in the process is to write the logic for assertions. -1. In most cases, you will be asserting against values retrieved from the GCP environment. This can be done by using the (gcloud)[https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/blob/master/infra/blueprint-test/pkg/gcloud/gcloud.go] helper in our test framework, which executes gcloud commands and stores their JSON outputs as. The gcloud helper can be initialized as follows: +1. In most cases, you will be asserting against values retrieved from the GCP environment. This can be done by using the [gcloud](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/blob/master/infra/blueprint-test/pkg/gcloud/gcloud.go) helper in our test framework, which executes gcloud commands and stores their JSON outputs as. The gcloud helper can be initialized as follows: ```go // The tft struct can be used to pull output variables of the TF module being invoked by this test op := gcloud.Run(t, fmt.Sprintf("sql instances describe %s --project %s", mySqlT.GetStringOutput("name"), mySqlT.GetStringOutput("project_id"))) ``` -2. Once you have retrieved values from GCP, use the (assert)[https://pkg.go.dev/github.com/stretchr/testify/assert] package to perform custom validations with respect to the resources provisioned. Here are some common assertions that can be useful in most test scenarios. +2. Once you have retrieved values from GCP, use the [assert](https://pkg.go.dev/github.com/stretchr/testify/assert) package to perform custom validations with respect to the resources provisioned. Here are some common assertions that can be useful in most test scenarios. 1. Equal @@ -211,7 +207,7 @@ op := gcloud.Run(t, fmt.Sprintf("sql instances describe %s --project %s", mySqlT // assert values that are supposed to be equal to the expected values assert.Equal(databaseVersion, op.Get("databaseVersion").String(), "database versions is valid is set to "+databaseVersion) ``` - + 2. Contains ```go @@ -255,16 +251,16 @@ By default, tests go through 4 stages above. You can also explicitly run individ ## 4.1 Test prerequisites In order for the test to execute, certain prerequisite resources and components need to be in place. These can be set up using the TF modules under `test/setup`. Running `terraform apply` in this directory will set up all resources required for the test. -``` -Note: Output values from `test/setup` are automatically loaded as Terraform environment variables and are available to both auto discovered and custom/explicit tests as inputs. This is also illustrated in the (Create the example configuration - Step 4)[https://github.com/g-awmalik/cloud-foundation-toolkit/tree/feat/add-bp-test-doc/infra/blueprint-test#3111-create-the-example-configuration] above where the `project_id` variable output by the `test/setup` is consumed as a variable for the example. -``` +> [!NOTE] +> Output values from `test/setup` are automatically loaded as Terraform environment variables and are available to both auto discovered and custom/explicit tests as inputs. This is also illustrated in the [Create the example configuration - Step 4](https://github.com/g-awmalik/cloud-foundation-toolkit/tree/feat/add-bp-test-doc/infra/blueprint-test#3111-create-the-example-configuration) above where the `project_id` variable output by the `test/setup` is consumed as a variable for the example. + ## 4.2 Default and stage specific execution 1. Cd into the `test/integration` directory 2. Run one following command(s) that will run all tests by default going through all 4 stages: - `go test` OR - `go test -v` (for verbose output) -3. To run the tests for a specific stage, use the following format/command to run the test & stage: +3. To run the tests for a specific stage, use the following format/command to run the test & stage: - `RUN_STAGE= go test` E.g. to run a test for just the init stage the use the following command: - `RUN_STAGE=init go test` @@ -275,7 +271,7 @@ All blueprints come pre-wired with an auto-discovered test located in the `test/ ```go package test -import ( +import ( // should be imported to enable testing for GO modules "testing" @@ -293,7 +289,7 @@ func TestAll(t *testing.T) { We’ll use the blueprint structure highlighted in section 2.1 for explaining how auto-discovered test execution works. The auto-discovered test can be triggered as follow: -- cd into test/integration and run: +- cd into test/integration and run: - `go test` OR - `go test -v` (for verbose output) @@ -314,27 +310,27 @@ This section shows the execution for auto-discovered tests and the output illust 2. Beginning of the `init` stage ![2](https://user-images.githubusercontent.com/21246369/131234078-674e5e64-18fa-448c-a7ff-0ae87587adec.jpg) - + This illustrates the start of `init` stage of the test execution. At this point TF init and plan is applied on the `mysql-private` example - + 3. Beginning of `apply` stage ![3](https://user-images.githubusercontent.com/21246369/131234092-8c095158-5262-47b2-8137-41d20a78d5d2.jpg) - + This illustrates the execution of the `apply` stage and also shows the simulated FAIL scenario where an output variable is not configured as “sensitive”. At this point, the test will be marked as failed. 4. Beginning of `verify` stage - ``` - Note: this illustration is from the 2nd test case (mssql-public) that made it through the apply stage successfully. - ``` +> [!NOTE] +> this illustration is from the 2nd test case (mssql-public) that made it through the apply stage successfully. + ![4](https://user-images.githubusercontent.com/21246369/131234120-6a3bdb62-0437-4d93-a1b7-2ef7b0191d40.jpg) This illustrates the execution of the verify stage where TF plan is executed to refresh the TF state and confirm no permadiffs occur and all resources were successfully provisioned. 5. Beginning of `destroy` stage ![5](https://user-images.githubusercontent.com/21246369/131234147-b0235cc3-9f5f-4c19-892f-1b4fc6cc12f4.jpg) - - This illustrates the execution of the `destroy` stage where `terraform destroy` is executed to teardown all resources provisioned by the example. + + This illustrates the execution of the `destroy` stage where `terraform destroy` is executed to teardown all resources provisioned by the example. Lastly, a status of the complete test run is shown with a tally of all passed and failed tests and eventually showing the overall status of the test run which is FAIL in this case. ## 4.4 Custom tests @@ -342,35 +338,35 @@ Unlike auto-discovered tests, custom tests are written specifically for examples 1. Cd into `test/integration` Instead of running the whole test suite, we will target our custom test by name i.e. `TestMySqlPublicModule` in file `test/integration/mysql-public/mysql_public_test.go` - + 2. Run the one of the following commands for execution: - `go test -run TestMySqlPublicModule ./...` OR - `go test -run TestMySqlPublicModule ./... -v` (for verbose output) In the above commands the test module name is specified with the `-run` parameter. This name can also be in the form of a regular expression as explained in the tip below. The usage of `./â€Ļ` in the above commands allows for golang to execute tests in subdirectories as well. - + ``` Tip: Targeting Specific Tests - + Apart from running tests by default, specific or all tests can be targeted using RegEx expressions. - + To run all tests regardless if they are custom or auto-discovered, use the following command: `go test -v ./... -p 1 .` - + To run a specific test or a set of tests using a regular expression, use the following command: `go test -run TestAll/*` - will target all tests that are supposed to be invoked as part of the auto-discovery process. `go test -run MySql ./...` - will target all tests that are written for MySql i.e. have ‘MySql’ as part of their test module name. - + Furthermore, to run a specific stage of a test or a set of tests, set the RUN_STAGE environment variable: This command specifically runs only the setup stage for all tests that are auto-discovered `RUN_STAGE=setup go test -run TestAll/* .` ``` - + ### 4.4.1. Custom test stage specific execution -By default, a custom test also goes through 4 stages as auto-discovered tests do. However, depending on the custom test configuration, there can be additional test logic that is executed in one or more stages of the test(s). +By default, a custom test also goes through 4 stages as auto-discovered tests do. However, depending on the custom test configuration, there can be additional test logic that is executed in one or more stages of the test(s). -E.g., in order to run only the `verify` stage for a custom test, run one of the following command: +E.g., in order to run only the `verify` stage for a custom test, run one of the following command: ``` RUN_STAGE=verify go test -run TestMySqlPublicModule ./... @@ -383,10 +379,10 @@ Here, the custom assertion failed since the expected region and zone configured # 5. Appendix -## 5.1 Advanced Topic +## 5.1 Advanced Topics ### 5.1.1 Terraform Fixtures -Fixtures can also be used to test similar examples and modules when the only thing changing is the data. The following example illustrates the usage of the `examples/mysql-public` as the source and passing in the data required to execute the test. +Fixtures can also be used to test similar examples and modules when the only thing changing is the data. The following example illustrates the usage of the `examples/mysql-public` as the source and passing in the data required to execute the test. 1. Cd into `test/fixtures` directory 2. Create a directory `mysql-public` and cd into it @@ -405,3 +401,17 @@ module "mysql-fixture" { Similar to the example module, outputs can be configured for the fixture module as well, especially for the generated values that need to be asserted in the test. Complete code files for the fixture module can be found [here](https://github.com/terraform-google-modules/terraform-google-sql-db/tree/master/test/fixtures/mysql-public). + +### 5.1.2 Plan Assertions + +The `plan` stage can be used to perform additional assertions on planfiles. This can be useful for scenarios where additional validation is useful to fail fast before proceeding to more expensive stages like `apply`, or smoke testing configuration without performing an `apply` at all. + +Currently a default plan function does not exist and cannot be used with auto generated tests. Plan stage can be activated by providing a custom plan function. Plan function recieves a parsed `PlanStruct` which contains the [raw TF plan JSON representation](https://www.terraform.io/docs/internals/json-format.html#plan-representation) as well as some additional processed data like map of resource changes. + +```go +networkBlueprint.DefinePlan(func(ps *terraform.PlanStruct, assert *assert.Assertions) { + ... + }) +``` + +Additionally, the `TFBlueprintTest` also exposes a `PlanAndShow` method which can be used to perform ad-hoc plans (for example in `verify` stage). diff --git a/infra/blueprint-test/build/int.cloudbuild.yaml b/infra/blueprint-test/build/int.cloudbuild.yaml new file mode 100644 index 00000000000..d80eb51f023 --- /dev/null +++ b/infra/blueprint-test/build/int.cloudbuild.yaml @@ -0,0 +1,39 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +steps: +- id: prepare + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && prepare_environment'] + dir: 'infra/blueprint-test' +- id: pkg-test + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'go test ./... -v'] + dir: 'infra/blueprint-test/pkg' +- id: test + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'git config --global user.name "cft-test"; git config --global user.email "<>"; go test -v -timeout 20m'] + dir: 'infra/blueprint-test/test' +- id: teardown + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && cleanup_environment'] + dir: 'infra/blueprint-test' +substitutions: + _DOCKER_IMAGE_DEVELOPER_TOOLS: 'cft/developer-tools-krm' + _DOCKER_TAG_VERSION_DEVELOPER_TOOLS: '1.23' +options: + env: + - 'TF_VAR_org_id=$_ORG_ID' + - 'TF_VAR_folder_id=$_FOLDER_ID' + - 'TF_VAR_billing_account=$_BILLING_ACCOUNT' diff --git a/infra/blueprint-test/examples/krm_blueprints_with_test/examples/vpc-example-custom/README.md b/infra/blueprint-test/examples/krm_blueprints_with_test/examples/vpc-example-custom/README.md index c775a744fad..5d265195539 100644 --- a/infra/blueprint-test/examples/krm_blueprints_with_test/examples/vpc-example-custom/README.md +++ b/infra/blueprint-test/examples/krm_blueprints_with_test/examples/vpc-example-custom/README.md @@ -66,4 +66,3 @@ vpc.yaml compute.cnrm.cloud.google.com/v1beta1 ComputeNetwork netwo ``` kpt live status --output table --poll-until current ``` - diff --git a/infra/blueprint-test/examples/krm_blueprints_with_test/examples/vpc-example/README.md b/infra/blueprint-test/examples/krm_blueprints_with_test/examples/vpc-example/README.md index c775a744fad..5d265195539 100644 --- a/infra/blueprint-test/examples/krm_blueprints_with_test/examples/vpc-example/README.md +++ b/infra/blueprint-test/examples/krm_blueprints_with_test/examples/vpc-example/README.md @@ -66,4 +66,3 @@ vpc.yaml compute.cnrm.cloud.google.com/v1beta1 ComputeNetwork netwo ``` kpt live status --output table --poll-until current ``` - diff --git a/infra/blueprint-test/examples/policy-library/lib/constraints.rego b/infra/blueprint-test/examples/policy-library/lib/constraints.rego new file mode 100644 index 00000000000..8139e8219f5 --- /dev/null +++ b/infra/blueprint-test/examples/policy-library/lib/constraints.rego @@ -0,0 +1,36 @@ +# +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package validator.gcp.lib + +# Function to fetch the constraint spec +# Usage: +# get_constraint_params(constraint, params) + +get_constraint_params(constraint) = params { + params := constraint.spec.parameters +} + +# Function to fetch constraint info +# Usage: +# get_constraint_info(constraint, info) + +get_constraint_info(constraint) = info { + info := { + "name": constraint.metadata.name, + "kind": constraint.kind, + } +} diff --git a/infra/blueprint-test/examples/policy-library/lib/util.rego b/infra/blueprint-test/examples/policy-library/lib/util.rego new file mode 100644 index 00000000000..292cf94f832 --- /dev/null +++ b/infra/blueprint-test/examples/policy-library/lib/util.rego @@ -0,0 +1,46 @@ +# +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package validator.gcp.lib + +# has_field returns whether an object has a field +has_field(object, field) { + object[field] +} + +# False is a tricky special case, as false responses would create an undefined document unless +# they are explicitly tested for +has_field(object, field) { + object[field] == false +} + +has_field(object, field) = false { + not object[field] + not object[field] == false +} + +# get_default returns the value of an object's field or the provided default value. +# It avoids creating an undefined state when trying to access an object attribute that does +# not exist +get_default(object, field, _default) = output { + has_field(object, field) + output = object[field] +} + +get_default(object, field, _default) = output { + has_field(object, field) == false + output = _default +} diff --git a/infra/blueprint-test/examples/policy-library/lib/util_test.rego b/infra/blueprint-test/examples/policy-library/lib/util_test.rego new file mode 100644 index 00000000000..9e7eb00c1b1 --- /dev/null +++ b/infra/blueprint-test/examples/policy-library/lib/util_test.rego @@ -0,0 +1,51 @@ +# +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package validator.gcp.lib + +# has_field tests +test_has_field_exists { + obj := {"a": "b"} + true == has_field(obj, "a") +} + +# False is a tricky special case, as false responses would create an undefined document unless +# they are explicitly tested for +test_has_field_false { + obj := {"a": false} + true == has_field(obj, "a") +} + +test_has_field_no_field { + obj := {} + false == has_field(obj, "a") +} + +# get_default_tests +test_get_default_exists { + obj := {"a": "b"} + "b" == get_default(obj, "a", "q") +} + +test_get_default_not_exists { + obj := {} + "q" == get_default(obj, "a", "q") +} + +test_get_default_has_false { + obj := {"a": false} + false == get_default(obj, "a", "b") +} diff --git a/infra/blueprint-test/examples/policy-library/policies/constraints/serviceusage_allow_basic_apis.yaml b/infra/blueprint-test/examples/policy-library/policies/constraints/serviceusage_allow_basic_apis.yaml new file mode 100644 index 00000000000..a183143bc86 --- /dev/null +++ b/infra/blueprint-test/examples/policy-library/policies/constraints/serviceusage_allow_basic_apis.yaml @@ -0,0 +1,31 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: constraints.gatekeeper.sh/v1alpha1 +kind: GCPServiceUsageConstraintV1 +metadata: + name: allow_basic_set_of_apis + annotations: + description: Only a basic set of APIS + bundles.validator.forsetisecurity.org/healthcare-baseline-v1: security +spec: + severity: high + match: + target: # {"$ref":"#/definitions/io.k8s.cli.setters.target"} + - "organizations/**" + exclude: [] + parameters: + mode: allow + services: + - "cloudresourcemanager.googleapis.com" diff --git a/infra/blueprint-test/examples/policy-library/policies/templates/gcp_serviceusage_allowed_services_v1.yaml b/infra/blueprint-test/examples/policy-library/policies/templates/gcp_serviceusage_allowed_services_v1.yaml new file mode 100644 index 00000000000..5ff84ce3063 --- /dev/null +++ b/infra/blueprint-test/examples/policy-library/policies/templates/gcp_serviceusage_allowed_services_v1.yaml @@ -0,0 +1,99 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: templates.gatekeeper.sh/v1alpha1 +kind: ConstraintTemplate +metadata: + name: gcp-serviceusage-allowed-services-v1 +spec: + crd: + spec: + names: + kind: GCPServiceUsageConstraintV1 + validation: + openAPIV3Schema: + properties: + mode: + description: "Enforcement mode, defaults to allow" + type: string + enum: [allow, deny] + services: + description: "List of APIs to enforce on e.g compute.googleapis.com" + type: array + items: + type: string + targets: + validation.gcp.forsetisecurity.org: + rego: | + # + # Copyright 2021 Google LLC + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # https://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + # + + package templates.gcp.GCPServiceUsageConstraintV1 + + import data.validator.gcp.lib as lib + + deny[{ + "msg": message, + "details": metadata, + }] { + constraint := input.constraint + lib.get_constraint_params(constraint, params) + asset := input.asset + asset.asset_type == "serviceusage.googleapis.com/Service" + + service_usage := asset.resource.data + parent := service_usage.parent + state := service_usage.state + service := service_usage.name + + mode := lib.get_default(params, "mode", "allow") + + state == "ENABLED" + matches_found = [m | m = params.services[_]; m == service] + target_match_count(mode, desired_count) + count(matches_found) != desired_count + + message := sprintf("%v violates a service constraint", [asset.name]) + metadata := { + "resource": asset.name, + "mode": mode, + "service": service, + } + } + + ########################### + # Rule Utilities + ########################### + + target_match_count(mode) = 0 { + mode == "deny" + } + + target_match_count(mode) = 1 { + mode == "allow" + } + #ENDINLINE diff --git a/infra/blueprint-test/examples/retry_errors/main.tf b/infra/blueprint-test/examples/retry_errors/main.tf new file mode 100644 index 00000000000..f9b4301e584 --- /dev/null +++ b/infra/blueprint-test/examples/retry_errors/main.tf @@ -0,0 +1,24 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +resource "google_data_catalog_taxonomy" "basic_taxonomy" { + provider = google-beta + project = var.project_id + region = "us" + display_name = "basic_taxonomy" + description = "A collection of policy tags" + activated_policy_types = ["FINE_GRAINED_ACCESS_CONTROL"] +} diff --git a/infra/blueprint-test/examples/retry_errors/variables.tf b/infra/blueprint-test/examples/retry_errors/variables.tf new file mode 100644 index 00000000000..0127bdc98b3 --- /dev/null +++ b/infra/blueprint-test/examples/retry_errors/variables.tf @@ -0,0 +1,20 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +variable "project_id" { + description = "Project to create Data Catalog taxonomy." + type = string +} diff --git a/infra/blueprint-test/examples/simple_krm_blueprint/Kptfile b/infra/blueprint-test/examples/simple_krm_blueprint/Kptfile index 4b653fbb1b2..6122071789a 100644 --- a/infra/blueprint-test/examples/simple_krm_blueprint/Kptfile +++ b/infra/blueprint-test/examples/simple_krm_blueprint/Kptfile @@ -3,23 +3,9 @@ kind: Kptfile metadata: name: simple_krm_blueprint annotations: - blueprints.cloud.google.com/title: Virtual Private Cloud blueprint -upstream: - type: git - git: - repo: https://github.com/GoogleCloudPlatform/blueprints - directory: /catalog/networking/network/vpc - ref: main - updateStrategy: resource-merge -upstreamLock: - type: git - git: - repo: https://github.com/GoogleCloudPlatform/blueprints - directory: /catalog/networking/network/vpc - ref: main - commit: 1a24e4cdae0b16883657888b8165f282469dcd79 + config.kubernetes.io/local-config: "true" info: - description: A Virtual Private Cloud (VPC) + description: simple krm blueprint pipeline: mutators: - image: gcr.io/kpt-fn/apply-setters:v0.1 diff --git a/infra/blueprint-test/examples/simple_krm_blueprint/README.md b/infra/blueprint-test/examples/simple_krm_blueprint/README.md deleted file mode 100644 index c775a744fad..00000000000 --- a/infra/blueprint-test/examples/simple_krm_blueprint/README.md +++ /dev/null @@ -1,69 +0,0 @@ -# Virtual Private Cloud blueprint - -A Virtual Private Cloud (VPC) - -## Setters - -``` -Setter Usages -namespace 1 -network-name 1 -project-id 3 -``` - -## Sub-packages - -This package has no sub-packages. - -## Resources - -``` -File APIVersion Kind Name Namespace -services.yaml serviceusage.cnrm.cloud.google.com/v1beta1 Service project-id-compute projects -vpc.yaml compute.cnrm.cloud.google.com/v1beta1 ComputeNetwork network-name networking -``` - -## Resource References - -- [ComputeNetwork](https://cloud.google.com/config-connector/docs/reference/resource-docs/compute/computenetwork) -- [Service](https://cloud.google.com/config-connector/docs/reference/resource-docs/serviceusage/service) - -## Usage - -1. Clone the package: - ``` - kpt pkg get https://github.com/GoogleCloudPlatform/blueprints.git/catalog/networking/network/vpc@${VERSION} - ``` - Replace `${VERSION}` with the desired repo branch or tag - (for example, `main`). - -1. Move into the local package: - ``` - cd "./vpc/" - ``` - -1. Edit the function config file(s): - - setters.yaml - -1. Execute the function pipeline - ``` - kpt fn render - ``` - -1. Initialize the resource inventory - ``` - kpt live init --namespace ${NAMESPACE}" - ``` - Replace `${NAMESPACE}` with the namespace in which to manage - the inventory ResourceGroup (for example, `config-control`). - -1. Apply the package resources to your cluster - ``` - kpt live apply - ``` - -1. Wait for the resources to be ready - ``` - kpt live status --output table --poll-until current - ``` - diff --git a/infra/blueprint-test/examples/simple_krm_blueprint/pod.yaml b/infra/blueprint-test/examples/simple_krm_blueprint/pod.yaml new file mode 100644 index 00000000000..690853cc2ab --- /dev/null +++ b/infra/blueprint-test/examples/simple_krm_blueprint/pod.yaml @@ -0,0 +1,24 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Pod +metadata: + name: nginx # kpt-set: ${pod_name} +spec: + containers: + - name: nginx + image: nginx:latest + ports: + - containerPort: 80 diff --git a/infra/blueprint-test/examples/simple_krm_blueprint/setters.yaml b/infra/blueprint-test/examples/simple_krm_blueprint/setters.yaml index 5e3e61466c9..d6d15a18e82 100644 --- a/infra/blueprint-test/examples/simple_krm_blueprint/setters.yaml +++ b/infra/blueprint-test/examples/simple_krm_blueprint/setters.yaml @@ -1,4 +1,4 @@ -# Copyright 2021 Google LLC +# Copyright 2021-2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,7 +16,7 @@ apiVersion: v1 kind: ConfigMap metadata: # kpt-merge: /setters name: setters + annotations: + config.kubernetes.io/local-config: "true" data: - namespace: config-control - network-name: custom-network - project-id: project-id + pod_name: simple-krm-blueprint diff --git a/infra/blueprint-test/examples/simple_krm_blueprint/vpc.yaml b/infra/blueprint-test/examples/simple_krm_blueprint/vpc.yaml deleted file mode 100644 index 4e3484f362c..00000000000 --- a/infra/blueprint-test/examples/simple_krm_blueprint/vpc.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright 2021 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: compute.cnrm.cloud.google.com/v1beta1 -kind: ComputeNetwork -metadata: # kpt-merge: networking/network-name - name: network-name # kpt-set: ${network-name} - namespace: networking # kpt-set: ${namespace} - annotations: - cnrm.cloud.google.com/blueprint: cnrm/landing-zone:networking/v0.3.0 - cnrm.cloud.google.com/project-id: project-id # kpt-set: ${project-id} -spec: - autoCreateSubnetworks: false - deleteDefaultRoutesOnCreate: false - routingMode: GLOBAL diff --git a/infra/blueprint-test/examples/simple_pet_module/main.tf b/infra/blueprint-test/examples/simple_pet_module/main.tf new file mode 100644 index 00000000000..e1890023c1f --- /dev/null +++ b/infra/blueprint-test/examples/simple_pet_module/main.tf @@ -0,0 +1,14 @@ +variable "test" { + default = "" +} + +resource "random_pet" "hello" { +} + +output "test" { + value = var.test +} + +output "current_ws" { + value = terraform.workspace +} diff --git a/infra/blueprint-test/examples/simple_tf_module/main.tf b/infra/blueprint-test/examples/simple_tf_module/main.tf index 25ed15c5963..78e1378fd78 100644 --- a/infra/blueprint-test/examples/simple_tf_module/main.tf +++ b/infra/blueprint-test/examples/simple_tf_module/main.tf @@ -14,17 +14,13 @@ * limitations under the License. */ -provider "google" { - version = "~> 3.45.0" -} - -provider "null" { - version = "~> 2.1" +terraform { + backend "local" {} } module "test-vpc-module" { source = "terraform-google-modules/network/google" - version = "~> 3.2.0" + version = "~> 7.0" project_id = var.project_id # Replace this with your project ID in quotes network_name = var.network_name mtu = 1460 @@ -50,6 +46,7 @@ module "test-vpc-module" { subnet_flow_logs_interval = "INTERVAL_10_MIN" subnet_flow_logs_sampling = 0.7 subnet_flow_logs_metadata = "INCLUDE_ALL_METADATA" + subnet_flow_logs_filter = "false" } ] } diff --git a/infra/blueprint-test/examples/simple_tf_module/versions.tf b/infra/blueprint-test/examples/simple_tf_module/versions.tf new file mode 100644 index 00000000000..28a17973531 --- /dev/null +++ b/infra/blueprint-test/examples/simple_tf_module/versions.tf @@ -0,0 +1,27 @@ +/** + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +terraform { + required_version = ">= 0.13.0" + required_providers { + google = { + source = "hashicorp/google" + } + null = { + source = "hashicorp/null" + } + } +} diff --git a/infra/blueprint-test/examples/simple_tf_module_with_test/main.tf b/infra/blueprint-test/examples/simple_tf_module_with_test/main.tf index 62e14a6ce68..08676de1fd8 100644 --- a/infra/blueprint-test/examples/simple_tf_module_with_test/main.tf +++ b/infra/blueprint-test/examples/simple_tf_module_with_test/main.tf @@ -15,16 +15,16 @@ */ provider "google" { - version = "~> 3.45.0" + version = ">= 3.45.0" } provider "null" { - version = "~> 2.1" + version = ">= 2.1" } module "test-vpc-module" { source = "terraform-google-modules/network/google" - version = "~> 3.2.0" + version = "~> 7.0.0" project_id = var.project_id network_name = var.network_name mtu = 1460 diff --git a/infra/blueprint-test/examples/tf_vet/main.tf b/infra/blueprint-test/examples/tf_vet/main.tf new file mode 100644 index 00000000000..97e8cffbd58 --- /dev/null +++ b/infra/blueprint-test/examples/tf_vet/main.tf @@ -0,0 +1,22 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +resource "google_project_service" "project" { + project = var.project_id + service = var.service + disable_dependent_services = false + disable_on_destroy = false +} diff --git a/infra/blueprint-test/examples/tf_vet/variables.tf b/infra/blueprint-test/examples/tf_vet/variables.tf new file mode 100644 index 00000000000..aad6fb7457d --- /dev/null +++ b/infra/blueprint-test/examples/tf_vet/variables.tf @@ -0,0 +1,25 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +variable "project_id" { + description = "Project to enable the service." + type = string +} + +variable "service" { + description = "Service to be enabled." + type = string +} diff --git a/infra/blueprint-test/go.mod b/infra/blueprint-test/go.mod index bc90691a356..e4afbbbad6f 100644 --- a/infra/blueprint-test/go.mod +++ b/infra/blueprint-test/go.mod @@ -1,22 +1,140 @@ module github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test -go 1.16 +go 1.22.7 require ( - github.com/GoogleContainerTools/kpt-functions-catalog/functions/go/list-setters v0.1.0 - github.com/google/go-cmp v0.5.6 // indirect - github.com/gruntwork-io/terratest v0.35.6 - github.com/kr/text v0.2.0 // indirect - github.com/mitchellh/go-testing-interface v1.14.2-0.20210217184823-a52172cd2f64 - github.com/mitchellh/go-wordwrap v1.0.0 // indirect - github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect - github.com/otiai10/copy v1.6.0 - github.com/stretchr/testify v1.7.0 - github.com/tidwall/gjson v1.10.2 - golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 // indirect - golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420 // indirect - golang.org/x/sys v0.0.0-20210603125802-9665404d3644 // indirect - golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d // indirect - gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect - sigs.k8s.io/kustomize/kyaml v0.11.0 + github.com/GoogleContainerTools/kpt-functions-sdk/go/api v0.0.0-20230427202446-3255accc518d + github.com/alexflint/go-filemutex v1.3.0 + github.com/gruntwork-io/terratest v0.48.1 + github.com/hashicorp/terraform-config-inspect v0.0.0-20241129133400-c404f8227ea6 + github.com/hashicorp/terraform-json v0.24.0 + github.com/mattn/go-shellwords v1.0.12 + github.com/mitchellh/go-testing-interface v1.14.2-0.20210821155943-2d9075ca8770 + github.com/otiai10/copy v1.14.0 + github.com/stretchr/testify v1.10.0 + github.com/tidwall/gjson v1.18.0 + github.com/tidwall/sjson v1.2.5 + golang.org/x/mod v0.22.0 + golang.org/x/sync v0.10.0 + sigs.k8s.io/kustomize/kyaml v0.18.1 +) + +require ( + filippo.io/edwards25519 v1.1.0 // indirect + github.com/agext/levenshtein v1.2.3 // indirect + github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect + github.com/aws/aws-sdk-go-v2 v1.32.5 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 // indirect + github.com/aws/aws-sdk-go-v2/config v1.28.5 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.46 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.20 // indirect + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.41 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.24 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.24 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.24 // indirect + github.com/aws/aws-sdk-go-v2/service/acm v1.30.6 // indirect + github.com/aws/aws-sdk-go-v2/service/autoscaling v1.51.0 // indirect + github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.44.0 // indirect + github.com/aws/aws-sdk-go-v2/service/dynamodb v1.37.1 // indirect + github.com/aws/aws-sdk-go-v2/service/ec2 v1.193.0 // indirect + github.com/aws/aws-sdk-go-v2/service/ecr v1.36.6 // indirect + github.com/aws/aws-sdk-go-v2/service/ecs v1.52.0 // indirect + github.com/aws/aws-sdk-go-v2/service/iam v1.38.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.5 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.5 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.5 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.5 // indirect + github.com/aws/aws-sdk-go-v2/service/kms v1.37.6 // indirect + github.com/aws/aws-sdk-go-v2/service/lambda v1.69.0 // indirect + github.com/aws/aws-sdk-go-v2/service/rds v1.91.0 // indirect + github.com/aws/aws-sdk-go-v2/service/route53 v1.46.2 // indirect + github.com/aws/aws-sdk-go-v2/service/s3 v1.69.0 // indirect + github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.34.6 // indirect + github.com/aws/aws-sdk-go-v2/service/sns v1.33.6 // indirect + github.com/aws/aws-sdk-go-v2/service/sqs v1.37.1 // indirect + github.com/aws/aws-sdk-go-v2/service/ssm v1.56.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.24.6 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.5 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.33.1 // indirect + github.com/aws/smithy-go v1.22.1 // indirect + github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect + github.com/boombuler/barcode v1.0.1 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/go-errors/errors v1.5.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-openapi/jsonpointer v0.20.0 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.4 // indirect + github.com/go-sql-driver/mysql v1.8.1 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/gruntwork-io/go-commons v0.17.1 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-getter/v2 v2.2.3 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-safetemp v1.0.0 // indirect + github.com/hashicorp/go-version v1.7.0 // indirect + github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f // indirect + github.com/hashicorp/hcl/v2 v2.22.0 // indirect + github.com/imdario/mergo v0.3.16 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect + github.com/jackc/pgx/v5 v5.7.1 // indirect + github.com/jackc/puddle/v2 v2.2.2 // indirect + github.com/jinzhu/copier v0.4.0 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.16.7 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-zglob v0.0.4 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/moby/spdystream v0.2.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/pquerna/otp v1.4.0 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/tidwall/match v1.1.1 // indirect + github.com/tidwall/pretty v1.2.1 // indirect + github.com/tmccombs/hcl2json v0.6.4 // indirect + github.com/ulikunitz/xz v0.5.11 // indirect + github.com/urfave/cli/v2 v2.25.7 // indirect + github.com/xlab/treeprint v1.2.0 // indirect + github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect + github.com/zclconf/go-cty v1.15.1 // indirect + golang.org/x/crypto v0.31.0 // indirect + golang.org/x/net v0.33.0 // indirect + golang.org/x/oauth2 v0.24.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/term v0.27.0 // indirect + golang.org/x/text v0.21.0 // indirect + golang.org/x/time v0.8.0 // indirect + golang.org/x/tools v0.22.0 // indirect + google.golang.org/protobuf v1.35.1 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/api v0.28.4 // indirect + k8s.io/apimachinery v0.28.4 // indirect + k8s.io/client-go v0.28.4 // indirect + k8s.io/klog/v2 v2.100.1 // indirect + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect + k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/infra/blueprint-test/go.sum b/infra/blueprint-test/go.sum index 74b910ddc4b..97168f08ec5 100644 --- a/infra/blueprint-test/go.sum +++ b/infra/blueprint-test/go.sum @@ -1,847 +1,351 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-sdk-for-go v35.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v38.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v46.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= -github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= -github.com/Azure/go-autorest/autorest v0.11.0/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= -github.com/Azure/go-autorest/autorest v0.11.5/go.mod h1:foo3aIXRQ90zFve3r0QiDsrjGDUwWhKl0ZOQy1CT14k= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= -github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= -github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.1/go.mod h1:ea90/jvmnAwDrSooLH4sRIehEPtG/EPUXavDh31MnA4= -github.com/Azure/go-autorest/autorest/azure/cli v0.4.0/go.mod h1:JljT387FplPzBA31vUcvsetLKF3pec5bdAxjVU4kI2s= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= -github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc= -github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= -github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8= -github.com/Azure/go-autorest/autorest/validation v0.3.0/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= -github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20190822182118-27a4ced34534/go.mod h1:iroGtC8B3tQiqtds1l+mgk/BBOrxbqjH+eUfFQYRc14= -github.com/GoogleContainerTools/kpt-functions-catalog/functions/go/list-setters v0.1.0 h1:t+0ngfeqinOqizrKkYfXlqTHbUP4meGsuaZ1rnhlFlY= -github.com/GoogleContainerTools/kpt-functions-catalog/functions/go/list-setters v0.1.0/go.mod h1:ju1d4EAij/igLgN+0er7tPc1nTKn/PTjY00buPEsx0Q= -github.com/GoogleContainerTools/kpt-functions-sdk/go v0.0.0-20210810181223-632b30549de6 h1:oo4q344mHs4eg8puEsXdyikhwORcu2cLsKGsNFDqxqM= -github.com/GoogleContainerTools/kpt-functions-sdk/go v0.0.0-20210810181223-632b30549de6/go.mod h1:k86q33ABlA9TnUqRmHH9dnKY2Edh8YbxjRyPfjlM8jE= -github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OneOfOne/xxhash v1.2.7/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= -github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/agext/levenshtein v1.2.1 h1:QmvMAjj2aEICytGiWzmxoE0x2KZvE0fvmqMOfy2tjT8= -github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= -github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= -github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= -github.com/apparentlymart/go-textseg v1.0.0 h1:rRmlIsPEEhUTIKQb7T++Nz/A5Q6C9IuX2wFoYVvnCs0= -github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= -github.com/apparentlymart/go-textseg/v12 v12.0.0 h1:bNEQyAGak9tojivJNkoqWErVCQbjdL7GzRt3F8NvfJ0= -github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= -github.com/aws/aws-sdk-go v1.16.26/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.27.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.38.28/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/GoogleContainerTools/kpt-functions-sdk/go/api v0.0.0-20230427202446-3255accc518d h1:NQFVnLXevDG7Ht9B/46X3FWHg+gEQc8Q68PlAnY0XsM= +github.com/GoogleContainerTools/kpt-functions-sdk/go/api v0.0.0-20230427202446-3255accc518d/go.mod h1:prNhhUAODrB2VqHVead9tB8nLU9ffY4e4jjBwLMNO1M= +github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= +github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/alexflint/go-filemutex v1.3.0 h1:LgE+nTUWnQCyRKbpoceKZsPQbs84LivvgwUymZXdOcM= +github.com/alexflint/go-filemutex v1.3.0/go.mod h1:U0+VA/i30mGBlLCrFPGtTe9y6wGQfNAWPBTekHQ+c8A= +github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= +github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/aws/aws-sdk-go-v2 v1.32.5 h1:U8vdWJuY7ruAkzaOdD7guwJjD06YSKmnKCJs7s3IkIo= +github.com/aws/aws-sdk-go-v2 v1.32.5/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 h1:lL7IfaFzngfx0ZwUGOZdsFFnQ5uLvR0hWqqhyE7Q9M8= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7/go.mod h1:QraP0UcVlQJsmHfioCrveWOC1nbiWUl3ej08h4mXWoc= +github.com/aws/aws-sdk-go-v2/config v1.28.5 h1:Za41twdCXbuyyWv9LndXxZZv3QhTG1DinqlFsSuvtI0= +github.com/aws/aws-sdk-go-v2/config v1.28.5/go.mod h1:4VsPbHP8JdcdUDmbTVgNL/8w9SqOkM5jyY8ljIxLO3o= +github.com/aws/aws-sdk-go-v2/credentials v1.17.46 h1:AU7RcriIo2lXjUfHFnFKYsLCwgbz1E7Mm95ieIRDNUg= +github.com/aws/aws-sdk-go-v2/credentials v1.17.46/go.mod h1:1FmYyLGL08KQXQ6mcTlifyFXfJVCNJTVGuQP4m0d/UA= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.20 h1:sDSXIrlsFSFJtWKLQS4PUWRvrT580rrnuLydJrCQ/yA= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.20/go.mod h1:WZ/c+w0ofps+/OUqMwWgnfrgzZH1DZO1RIkktICsqnY= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.41 h1:hqcxMc2g/MwwnRMod9n6Bd+t+9Nf7d5qRg7RaXKPd6o= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.41/go.mod h1:d1eH0VrttvPmrCraU68LOyNdu26zFxQFjrVSb5vdhog= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.24 h1:4usbeaes3yJnCFC7kfeyhkdkPtoRYPa/hTmCqMpKpLI= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.24/go.mod h1:5CI1JemjVwde8m2WG3cz23qHKPOxbpkq0HaoreEgLIY= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.24 h1:N1zsICrQglfzaBnrfM0Ys00860C+QFwu6u/5+LomP+o= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.24/go.mod h1:dCn9HbJ8+K31i8IQ8EWmWj0EiIk0+vKiHNMxTTYveAg= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.24 h1:JX70yGKLj25+lMC5Yyh8wBtvB01GDilyRuJvXJ4piD0= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.24/go.mod h1:+Ln60j9SUTD0LEwnhEB0Xhg61DHqplBrbZpLgyjoEHg= +github.com/aws/aws-sdk-go-v2/service/acm v1.30.6 h1:fDg0RlN30Xf/yYzEUL/WXqhmgFsjVb/I3230oCfyI5w= +github.com/aws/aws-sdk-go-v2/service/acm v1.30.6/go.mod h1:zRR6jE3v/TcbfO8C2P+H0Z+kShiKKVaVyoIl8NQRjyg= +github.com/aws/aws-sdk-go-v2/service/autoscaling v1.51.0 h1:1KzQVZi7OTixxaVJ8fWaJAUBjme+iQ3zBOCZhE4RgxQ= +github.com/aws/aws-sdk-go-v2/service/autoscaling v1.51.0/go.mod h1:I1+/2m+IhnK5qEbhS3CrzjeiVloo9sItE/2K+so0fkU= +github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.44.0 h1:OREVd94+oXW5a+3SSUAo4K0L5ci8cucCLu+PSiek8OU= +github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.44.0/go.mod h1:Qbr4yfpNqVNl69l/GEDK+8wxLf/vHi0ChoiSDzD7thU= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.37.1 h1:vucMirlM6D+RDU8ncKaSZ/5dGrXNajozVwpmWNPn2gQ= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.37.1/go.mod h1:fceORfs010mNxZbQhfqUjUeHlTwANmIT4mvHamuUaUg= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.193.0 h1:RhSoBFT5/8tTmIseJUXM6INTXTQDF8+0oyxWBnozIms= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.193.0/go.mod h1:mzj8EEjIHSN2oZRXiw1Dd+uB4HZTl7hC8nBzX9IZMWw= +github.com/aws/aws-sdk-go-v2/service/ecr v1.36.6 h1:zg+3FGHA0PBs0KM25qE/rOf2o5zsjNa1g/Qq83+SDI0= +github.com/aws/aws-sdk-go-v2/service/ecr v1.36.6/go.mod h1:ZSq54Z9SIsOTf1Efwgw1msilSs4XVEfVQiP9nYVnKpM= +github.com/aws/aws-sdk-go-v2/service/ecs v1.52.0 h1:7/vgFWplkusJN/m+3QOa+W9FNRqa8ujMPNmdufRaJpg= +github.com/aws/aws-sdk-go-v2/service/ecs v1.52.0/go.mod h1:dPTOvmjJQ1T7Q+2+Xs2KSPrMvx+p0rpyV+HsQVnUK4o= +github.com/aws/aws-sdk-go-v2/service/iam v1.38.1 h1:hfkzDZHBp9jAT4zcd5mtqckpU4E3Ax0LQaEWWk1VgN8= +github.com/aws/aws-sdk-go-v2/service/iam v1.38.1/go.mod h1:u36ahDtZcQHGmVm/r+0L1sfKX4fzLEMdCqiKRKkUMVM= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 h1:iXtILhvDxB6kPvEXgsDhGaZCSC6LQET5ZHSdJozeI0Y= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1/go.mod h1:9nu0fVANtYiAePIBh2/pFUSwtJ402hLnp854CNoDOeE= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.5 h1:gvZOjQKPxFXy1ft3QnEyXmT+IqneM9QAUWlM3r0mfqw= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.5/go.mod h1:DLWnfvIcm9IET/mmjdxeXbBKmTCm0ZB8p1za9BVteM8= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.5 h1:3Y457U2eGukmjYjeHG6kanZpDzJADa2m0ADqnuePYVQ= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.5/go.mod h1:CfwEHGkTjYZpkQ/5PvcbEtT7AJlG68KkEvmtwU8z3/U= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.5 h1:wtpJ4zcwrSbwhECWQoI/g6WM9zqCcSpHDJIWSbMLOu4= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.5/go.mod h1:qu/W9HXQbbQ4+1+JcZp0ZNPV31ym537ZJN+fiS7Ti8E= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.5 h1:P1doBzv5VEg1ONxnJss1Kh5ZG/ewoIE4MQtKKc6Crgg= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.5/go.mod h1:NOP+euMW7W3Ukt28tAxPuoWao4rhhqJD3QEBk7oCg7w= +github.com/aws/aws-sdk-go-v2/service/kms v1.37.6 h1:CZImQdb1QbU9sGgJ9IswhVkxAcjkkD1eQTMA1KHWk+E= +github.com/aws/aws-sdk-go-v2/service/kms v1.37.6/go.mod h1:YJDdlK0zsyxVBxGU48AR/Mi8DMrGdc1E3Yij4fNrONA= +github.com/aws/aws-sdk-go-v2/service/lambda v1.69.0 h1:BXt75frE/FYtAmEDBJRBa2HexOw+oAZWZl6QknZEFgg= +github.com/aws/aws-sdk-go-v2/service/lambda v1.69.0/go.mod h1:guz2K3x4FKSdDaoeB+TPVgJNU9oj2gftbp5cR8ela1A= +github.com/aws/aws-sdk-go-v2/service/rds v1.91.0 h1:eqHz3Uih+gb0vLE5Cc4Xf733vOxsxDp6GFUUVQU4d7w= +github.com/aws/aws-sdk-go-v2/service/rds v1.91.0/go.mod h1:h2jc7IleH3xHY7y+h8FH7WAZcz3IVLOB6/jXotIQ/qU= +github.com/aws/aws-sdk-go-v2/service/route53 v1.46.2 h1:wmt05tPp/CaRZpPV5B4SaJ5TwkHKom07/BzHoLdkY1o= +github.com/aws/aws-sdk-go-v2/service/route53 v1.46.2/go.mod h1:d+K9HESMpGb1EU9/UmmpInbGIUcAkwmcY6ZO/A3zZsw= +github.com/aws/aws-sdk-go-v2/service/s3 v1.69.0 h1:Q2ax8S21clKOnHhhr933xm3JxdJebql+R7aNo7p7GBQ= +github.com/aws/aws-sdk-go-v2/service/s3 v1.69.0/go.mod h1:ralv4XawHjEMaHOWnTFushl0WRqim/gQWesAMF6hTow= +github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.34.6 h1:1KDMKvOKNrpD667ORbZ/+4OgvUoaok1gg/MLzrHF9fw= +github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.34.6/go.mod h1:DmtyfCfONhOyVAJ6ZMTrDSFIeyCBlEO93Qkfhxwbxu0= +github.com/aws/aws-sdk-go-v2/service/sns v1.33.6 h1:lEUtRHICiXsd7VRwRjXaY7MApT2X4Ue0Mrwe6XbyBro= +github.com/aws/aws-sdk-go-v2/service/sns v1.33.6/go.mod h1:SODr0Lu3lFdT0SGsGX1TzFTapwveBrT5wztVoYtppm8= +github.com/aws/aws-sdk-go-v2/service/sqs v1.37.1 h1:39WvSrVq9DD6UHkD+fx5x19P5KpRQfNdtgReDVNbelc= +github.com/aws/aws-sdk-go-v2/service/sqs v1.37.1/go.mod h1:3gwPzC9LER/BTQdQZ3r6dUktb1rSjABF1D3Sr6nS7VU= +github.com/aws/aws-sdk-go-v2/service/ssm v1.56.0 h1:mADKqoZaodipGgiZfuAjtlcr4IVBtXPZKVjkzUZCCYM= +github.com/aws/aws-sdk-go-v2/service/ssm v1.56.0/go.mod h1:l9qF25TzH95FhcIak6e4vt79KE4I7M2Nf59eMUVjj6c= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.6 h1:3zu537oLmsPfDMyjnUS2g+F2vITgy5pB74tHI+JBNoM= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.6/go.mod h1:WJSZH2ZvepM6t6jwu4w/Z45Eoi75lPN7DcydSRtJg6Y= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.5 h1:K0OQAsDywb0ltlFrZm0JHPY3yZp/S9OaoLU33S7vPS8= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.5/go.mod h1:ORITg+fyuMoeiQFiVGoqB3OydVTLkClw/ljbblMq6Cc= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.1 h1:6SZUVRQNvExYlMLbHdlKB48x0fLbc2iVROyaNEwBHbU= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.1/go.mod h1:GqWyYCwLXnlUB1lOAXQyNSPqPLQJvmo8J0DWBzp9mtg= +github.com/aws/smithy-go v1.22.1 h1:/HPHZQ0g7f4eUeK6HKglFz8uwVfZKgoI25rb/J+dnro= +github.com/aws/smithy-go v1.22.1/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/boombuler/barcode v1.0.1 h1:NDBbPmhS+EqABEs5Kg3n/5ZNjy73Pz7SIV+KCeqyXcs= +github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc= +github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= -github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= -github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v0.0.0-20200109221225-a4f60165b7a3/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= -github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/elazarl/goproxy v0.0.0-20190911111923-ecfe977594f1/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= -github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= -github.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0 h1:skJKxRtNmevLqnayafdLe2AsenqRupVmzZSqrvb5caU= -github.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= -github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= -github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= -github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= -github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= -github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= -github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= -github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= -github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= -github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= -github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= -github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= -github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= -github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= -github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= -github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= -github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= -github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= -github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v0.0.0-20181025225059-d3de96c4c28e/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= -github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-containerregistry v0.0.0-20200110202235-f4fb41bf00a3/go.mod h1:2wIuQute9+hhWqvL3vEI7YB0EKluF4WcPzI1eAliazk= -github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/go-errors/errors v1.5.0 h1:/EuijeGOu7ckFxzhkj4CXJ8JaenxK7bKUxpPYqeLHqQ= +github.com/go-errors/errors v1.5.0/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ= +github.com/go-openapi/jsonpointer v0.20.0/go.mod h1:6PGzBjjIIumbLYysB73Klnms1mwnU4G3YHOECG3CedA= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= +github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-test/deep v1.0.7 h1:/VSMRlnY/JSyqxQUzQLKVMAskpY/NZKFA5j2P+0pP2M= +github.com/go-test/deep v1.0.7/go.mod h1:QV8Hv/iy04NyLBxAdO9njL0iVPN1S4d/A3NVv1V36o8= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.2.2/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= -github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= -github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= -github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= -github.com/gorilla/mux v0.0.0-20181024020800-521ea7b17d02/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v0.0.0-20190222133341-cfaf5686ec79/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.3.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/gruntwork-io/go-commons v0.8.0/go.mod h1:gtp0yTtIBExIZp7vyIV9I0XQkVwiQZze678hvDXof78= -github.com/gruntwork-io/terratest v0.35.6 h1:Q7pUd3JI4i5mmR/KgYkZJJ4q9ZbV8ru9KydwjA/ohaA= -github.com/gruntwork-io/terratest v0.35.6/go.mod h1:GIVJGBV1WIv1vxIG31Ycy0CuHYfXuvvkilNQuC9Wi+o= -github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gruntwork-io/go-commons v0.17.1 h1:2KS9wAqrgeOTWj33DSHzDNJ1FCprptWdLFqej+wB8x0= +github.com/gruntwork-io/go-commons v0.17.1/go.mod h1:S98JcR7irPD1bcruSvnqupg+WSJEJ6xaM89fpUZVISk= +github.com/gruntwork-io/terratest v0.48.1 h1:pnydDjkWbZCUYXvQkr24y21fBo8PfJC5hRGdwbl1eXM= +github.com/gruntwork-io/terratest v0.48.1/go.mod h1:U2EQW4Odlz75XJUH16Kqkr9c93p+ZZtkpVez7GkZFa4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= -github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/hcl/v2 v2.8.2 h1:wmFle3D1vu0okesm8BTLVDyJ6/OL9DCLUwn0b2OptiY= -github.com/hashicorp/hcl/v2 v2.8.2/go.mod h1:bQTN5mpo+jewjJgh8jr0JUguIi7qPHUF6yIfAEN3jqY= -github.com/hashicorp/terraform-json v0.9.0 h1:WE7+Wt93W93feOiCligElSyS0tlDzwZUtJuDGIBr8zg= -github.com/hashicorp/terraform-json v0.9.0/go.mod h1:3defM4kkMfttwiE7VakJDwCd4R+umhSQnvJwORXbprE= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jinzhu/copier v0.0.0-20190924061706-b57f9002281a h1:zPPuIq2jAWWPTrGt70eK/BSch+gFAGrNzecsoENgu2o= -github.com/jinzhu/copier v0.0.0-20190924061706-b57f9002281a/go.mod h1:yL958EeXv8Ylng6IfnvG4oflryUi3vgA3xPs9hmII1s= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-getter/v2 v2.2.3 h1:6CVzhT0KJQHqd9b0pK3xSP0CM/Cv+bVhk+jcaRJ2pGk= +github.com/hashicorp/go-getter/v2 v2.2.3/go.mod h1:hp5Yy0GMQvwWVUmwLs3ygivz1JSLI323hdIE9J9m7TY= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= +github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f h1:UdxlrJz4JOnY8W+DbLISwf2B8WXEolNRA8BGCwI9jws= +github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= +github.com/hashicorp/hcl/v2 v2.22.0 h1:hkZ3nCtqeJsDhPRFz5EA9iwcG1hNWGePOTw6oyul12M= +github.com/hashicorp/hcl/v2 v2.22.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA= +github.com/hashicorp/terraform-config-inspect v0.0.0-20241129133400-c404f8227ea6 h1:146llE+6P/9YO8RcHRehzGNiS9+OoirKW9/aML6/JIA= +github.com/hashicorp/terraform-config-inspect v0.0.0-20241129133400-c404f8227ea6/go.mod h1:Gz/z9Hbn+4KSp8A2FBtNszfLSdT2Tn/uAKGuVqqWmDI= +github.com/hashicorp/terraform-json v0.24.0 h1:rUiyF+x1kYawXeRth6fKFm/MdfBS6+lW4NbeATsYz8Q= +github.com/hashicorp/terraform-json v0.24.0/go.mod h1:Nfj5ubo9xbu9uiAoZVBsNOjvNKB66Oyrvtit74kC7ow= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.7.1 h1:x7SYsPBYDkHDksogeSmZZ5xzThcTgRz++I5E+ePFUcs= +github.com/jackc/pgx/v5 v5.7.1/go.mod h1:e7O26IywZZ+naJtWWos6i6fvWK+29etgITqrqHLfoZA= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jinzhu/copier v0.4.0 h1:w3ciUoD19shMCRargcpm0cm91ytaBhDvuRpz1ODO/U8= +github.com/jinzhu/copier v0.4.0/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= +github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4= -github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM= -github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/mattn/go-runewidth v0.0.0-20181025052659-b20a3daf6a39/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-zglob v0.0.1/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo= -github.com/mattn/go-zglob v0.0.2-0.20190814121620-e3c945676326/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= -github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= +github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/mattn/go-zglob v0.0.4 h1:LQi2iOm0/fGgu80AioIJ/1j9w9Oh+9DZ39J4VAGzHQM= +github.com/mattn/go-zglob v0.0.4/go.mod h1:MxxjyoXXnMxfIpxTK2GAkw1w8glPsQILx3N5wrKakiY= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.14.2-0.20210217184823-a52172cd2f64 h1:+9bM6qWXndPx7+czi9+Jj6zHPioFpfdhwVGOYOgujMY= -github.com/mitchellh/go-testing-interface v1.14.2-0.20210217184823-a52172cd2f64/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= -github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/go-testing-interface v1.14.2-0.20210821155943-2d9075ca8770 h1:drhDO54gdT/a15GBcMRmunZiNcLgPiFIJa23KzmcvcU= +github.com/mitchellh/go-testing-interface v1.14.2-0.20210821155943-2d9075ca8770/go.mod h1:SO/iHr6q2EzbqRApt+8/E9wqebTwQn5y+UlB04bxzo0= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= -github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.4.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.3.0/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/open-policy-agent/frameworks/constraint v0.0.0-20200420221412-5bae2037a343/go.mod h1:Dr3QxvH+NTQcPPZWSt1ueNOsxW4VwgUltaLL7Ttnrac= -github.com/open-policy-agent/gatekeeper v3.0.4-beta.2+incompatible/go.mod h1:gWd63apzboCsahWE6btvIzIcXcNxT+lJvUAaluqVu/E= -github.com/open-policy-agent/opa v0.19.1/go.mod h1:rrwxoT/b011T0cyj+gg2VvxqTtn6N3gp/jzmr3fjW44= -github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/oracle/oci-go-sdk v7.1.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888= -github.com/otiai10/copy v1.6.0 h1:IinKAryFFuPONZ7cm6T6E2QX/vcJwSnlaA5lfoaXIiQ= -github.com/otiai10/copy v1.6.0/go.mod h1:XWfuS3CrI0R6IE0FbgHsEazaXO8G0LpMp9o8tos0x4E= -github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= -github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= -github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= -github.com/otiai10/mint v1.3.2 h1:VYWnrP5fXmz1MXvjuUvcBrXSjGE6xjON+axB/UrpO3E= -github.com/otiai10/mint v1.3.2/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/peterh/liner v0.0.0-20170211195444-bf27d3ba8e1d/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= -github.com/pkg/errors v0.0.0-20181023235946-059132a15dd0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE= +github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= +github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= +github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= +github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= +github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= +github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks= +github.com/otiai10/mint v1.5.1/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= -github.com/pquerna/otp v1.2.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg= -github.com/prometheus/client_golang v0.0.0-20181025174421-f30f42803563/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181020173914-7e9e6cabbd39/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rubiojr/go-vhd v0.0.0-20160810183302-0bfd3b39853c/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/soheilhy/cmux v0.1.3/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.0-20181021141114-fe5e611709b0/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v0.0.0-20181024212040-082b515c9490/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pquerna/otp v1.4.0 h1:wZvl1TIVxKRThZIBiwOOHOGP/1+nZyWBil9Y2XNEDzg= +github.com/pquerna/otp v1.4.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/tidwall/gjson v1.10.2 h1:APbLGOM0rrEkd8WBw9C24nllro4ajFuJu0Sc9hRz8Bo= -github.com/tidwall/gjson v1.10.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/vdemeester/k8s-pkg-credentialprovider v0.0.0-20200107171650-7c61ffa44238/go.mod h1:JwQJCMWpUDqjZrB5jpw0f5VbN7U95zxFy1ZDpoEarGo= -github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= -github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= -github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= -github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca h1:1CFlNzQhALwjS9mBAUkycX616GzgsuYUOCHA5+HSlXI= -github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yashtewari/glob-intersection v0.0.0-20180916065949-5c77d914dd0b/go.mod h1:HptNXiXVDcJjXe9SqMd0v2FsL9f8dz4GnXgltU6q/co= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= +github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= +github.com/tmccombs/hcl2json v0.6.4 h1:/FWnzS9JCuyZ4MNwrG4vMrFrzRgsWEOVi+1AyYUVLGw= +github.com/tmccombs/hcl2json v0.6.4/go.mod h1:+ppKlIW3H5nsAsZddXPy2iMyvld3SHxyjswOZhavRDk= +github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= +github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs= +github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= +github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= +github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= +github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= +github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= -github.com/zclconf/go-cty v1.2.1 h1:vGMsygfmeCl4Xb6OA5U5XVAaQZ69FvoG7X2jUtQujb8= -github.com/zclconf/go-cty v1.2.1/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= -go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= -go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/multierr v0.0.0-20180122172545-ddea229ff1df/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +github.com/zclconf/go-cty v1.15.1 h1:RgQYm4j2EvoBRXOPxhUvxPzRrGDo1eCOhHXuGfrj5S0= +github.com/zclconf/go-cty v1.15.1/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo= +github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 h1:/ZScEX8SfEmUGRHs0gxpqteO5nfNW6axyZbBdw9A12g= -golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181023182221-1baf3a9d7d67/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180112015858-5ccada7d0a7b/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420 h1:a8jGStKg0XqKDlKqjLrXn0ioF5MH36pT7Z0BRTqLhbk= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= +golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180117170059-2c42eef0765b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644 h1:CA1DEQ4NdKphKeL70tvsWNdT5oFh1lOjihRcEDROi0I= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE= -golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20171227012246-e19ae1496984/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191205215504-7b8c8591a921/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20201110201400-7099162a900a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= +golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= -gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.6.1-0.20190607001116-5213b8090861/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.0.0/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -k8s.io/api v0.0.0-20190918155943-95b840bb6a1f/go.mod h1:uWuOHnjmNrtQomJrvEBg0c0HRNyQ+8KTEERVsK0PW48= -k8s.io/api v0.17.0/go.mod h1:npsyOePkeP0CPwyGfXDHxvypiYMJxBWAMpQxCaJ4ZxI= -k8s.io/api v0.17.2/go.mod h1:BS9fjjLc4CMuqfSO8vgbHPKMt5+SF0ET6u/RVDihTo4= -k8s.io/api v0.17.4/go.mod h1:5qxx6vjmwUVG2nHQTKGlLts8Tbok8PzHl4vHtVFuZCA= -k8s.io/api v0.19.3/go.mod h1:VF+5FT1B74Pw3KxMdKyinLo+zynBaMBiAfGMuldcNDs= -k8s.io/apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783/go.mod h1:xvae1SZB3E17UpV59AWc271W/Ph25N+bjPyR63X6tPY= -k8s.io/apiextensions-apiserver v0.17.2/go.mod h1:4KdMpjkEjjDI2pPfBA15OscyNldHWdBCfsWMDWAmSTs= -k8s.io/apimachinery v0.0.0-20190913080033-27d36303b655/go.mod h1:nL6pwRT8NgfF8TT68DBI8uEePRt89cSvoXUVqbkWHq4= -k8s.io/apimachinery v0.17.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= -k8s.io/apimachinery v0.17.2/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= -k8s.io/apimachinery v0.17.4/go.mod h1:gxLnyZcGNdZTCLnq3fgzyg2A5BVCHTNDFrw8AmuJ+0g= -k8s.io/apimachinery v0.19.3/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= -k8s.io/apiserver v0.0.0-20190918160949-bfa5e2e684ad/go.mod h1:XPCXEwhjaFN29a8NldXA901ElnKeKLrLtREO9ZhFyhg= -k8s.io/apiserver v0.17.0/go.mod h1:ABM+9x/prjINN6iiffRVNCBR2Wk7uY4z+EtEGZD48cg= -k8s.io/apiserver v0.17.2/go.mod h1:lBmw/TtQdtxvrTk0e2cgtOxHizXI+d0mmGQURIHQZlo= -k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90/go.mod h1:J69/JveO6XESwVgG53q3Uz5OSfgsv4uxpScmmyYOOlk= -k8s.io/client-go v0.17.0/go.mod h1:TYgR6EUHs6k45hb6KWjVD6jFZvJV4gHDikv/It0xz+k= -k8s.io/client-go v0.17.2/go.mod h1:QAzRgsa0C2xl4/eVpeVAZMvikCn8Nm81yqVx3Kk9XYI= -k8s.io/client-go v0.17.4/go.mod h1:ouF6o5pz3is8qU0/qYL2RnoxOPqgfuidYLowytyLJmc= -k8s.io/client-go v0.19.3/go.mod h1:+eEMktZM+MG0KO+PTkci8xnbCZHvj9TqR6Q1XDUIJOM= -k8s.io/cloud-provider v0.17.0/go.mod h1:Ze4c3w2C0bRsjkBUoHpFi+qWe3ob1wI2/7cUn+YQIDE= -k8s.io/code-generator v0.0.0-20190912054826-cd179ad6a269/go.mod h1:V5BD6M4CyaN5m+VthcclXWsVcT1Hu+glwa1bi3MIsyE= -k8s.io/code-generator v0.0.0-20191121015212-c4c8f8345c7e/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s= -k8s.io/code-generator v0.17.2/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s= -k8s.io/component-base v0.0.0-20190918160511-547f6c5d7090/go.mod h1:933PBGtQFJky3TEwYx4aEPZ4IxqhWh3R6DCmzqIn1hA= -k8s.io/component-base v0.17.0/go.mod h1:rKuRAokNMY2nn2A6LP/MiwpoaMRHpfRnrPaUJJj1Yoc= -k8s.io/component-base v0.17.2/go.mod h1:zMPW3g5aH7cHJpKYQ/ZsGMcgbsA/VyhEugF3QT1awLs= -k8s.io/csi-translation-lib v0.17.0/go.mod h1:HEF7MEz7pOLJCnxabi45IPkhSsE/KmxPQksuCrHKWls= -k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= -k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= -k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= -k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e h1:KLHHjkdQFomZy8+06csTWZ0m1343QqxZhR2LJ1OxCYM= -k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= -k8s.io/legacy-cloud-providers v0.17.0/go.mod h1:DdzaepJ3RtRy+e5YhNtrCYwlgyK87j/5+Yfp0L9Syp8= -k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= -modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= -modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= -modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= -modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -sigs.k8s.io/controller-runtime v0.4.0/go.mod h1:ApC79lpY3PHW9xj/w9pj+lYkLgwAAUZwfXkME1Lajns= -sigs.k8s.io/kustomize/kyaml v0.11.0 h1:9KhiCPKaVyuPcgOLJXkvytOvjMJLoxpjodiycb4gHsA= -sigs.k8s.io/kustomize/kyaml v0.11.0/go.mod h1:GNMwjim4Ypgp/MueD3zXHLRJEjz7RvtPae0AwlvEMFM= -sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= -sigs.k8s.io/structured-merge-diff v0.0.0-20190817042607-6149e4549fca/go.mod h1:IIgPezJWb76P0hotTxzDbWsMYB8APh18qZnxkomBpxA= -sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18= -sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/testing_frameworks v0.1.2/go.mod h1:ToQrwSC3s8Xf/lADdZp3Mktcql9CG0UAmdJG9th5i0w= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/api v0.28.4 h1:8ZBrLjwosLl/NYgv1P7EQLqoO8MGQApnbgH8tu3BMzY= +k8s.io/api v0.28.4/go.mod h1:axWTGrY88s/5YE+JSt4uUi6NMM+gur1en2REMR7IRj0= +k8s.io/apimachinery v0.28.4 h1:zOSJe1mc+GxuMnFzD4Z/U1wst50X28ZNsn5bhgIIao8= +k8s.io/apimachinery v0.28.4/go.mod h1:wI37ncBvfAoswfq626yPTe6Bz1c22L7uaJ8dho83mgg= +k8s.io/client-go v0.28.4 h1:Np5ocjlZcTrkyRJ3+T3PkXDpe4UpatQxj85+xjaD2wY= +k8s.io/client-go v0.28.4/go.mod h1:0VDZFpgoZfelyP5Wqu0/r/TRYcLYuJ2U1KEeoaPa1N4= +k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= +k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/kustomize/kyaml v0.18.1 h1:WvBo56Wzw3fjS+7vBjN6TeivvpbW9GmRaWZ9CIVmt4E= +sigs.k8s.io/kustomize/kyaml v0.18.1/go.mod h1:C3L2BFVU1jgcddNBE1TxuVLgS46TjObMwW5FT9FcjYo= +sigs.k8s.io/structured-merge-diff/v4 v4.3.0 h1:UZbZAZfX0wV2zr7YZorDz6GXROfDFj6LvqCRm4VUVKk= +sigs.k8s.io/structured-merge-diff/v4 v4.3.0/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/infra/blueprint-test/pkg/benchmark/krm.go b/infra/blueprint-test/pkg/benchmark/krm.go index f9400357043..3a669cd65e9 100644 --- a/infra/blueprint-test/pkg/benchmark/krm.go +++ b/infra/blueprint-test/pkg/benchmark/krm.go @@ -53,7 +53,9 @@ func CreateVariant(b testing.TB, baseDir string, buildDir string, variantName st if err != nil { b.Fatalf("unable to read resources in %s :%v", variantPath, err) } - kpt.UpsertSetters(rs, setters) + if err := kpt.UpsertSetters(rs, setters); err != nil { + b.Fatalf("unable to upsert setters in %s :%v", variantPath, err) + } err = kpt.WritePkgResources(variantPath, rs) if err != nil { b.Fatalf("unable to write resources in %s :%v", variantPath, err) diff --git a/infra/blueprint-test/pkg/bq/bq.go b/infra/blueprint-test/pkg/bq/bq.go new file mode 100644 index 00000000000..d7c24c505fc --- /dev/null +++ b/infra/blueprint-test/pkg/bq/bq.go @@ -0,0 +1,150 @@ +/** + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package bq provides a set of helpers to interact with bq tool (part of CloudSDK) +package bq + +import ( + "os" + "strings" + + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/utils" + "github.com/gruntwork-io/terratest/modules/logger" + "github.com/gruntwork-io/terratest/modules/shell" + "github.com/mitchellh/go-testing-interface" + "github.com/tidwall/gjson" +) + +type CmdCfg struct { + bqBinary string // path to bq binary + commonArgs []string // common arguments to pass to bq calls + logger *logger.Logger // custom logger +} + +type cmdOption func(*CmdCfg) + +func WithBinary(bqBinary string) cmdOption { + return func(f *CmdCfg) { + f.bqBinary = bqBinary + } +} + +func WithCommonArgs(commonArgs []string) cmdOption { + return func(f *CmdCfg) { + f.commonArgs = commonArgs + } +} + +func WithLogger(logger *logger.Logger) cmdOption { + return func(f *CmdCfg) { + f.logger = logger + } +} + +// newCmdConfig sets defaults and validates values for bq Options. +func newCmdConfig(opts ...cmdOption) (*CmdCfg, error) { + gOpts := &CmdCfg{} + // apply options + for _, opt := range opts { + opt(gOpts) + } + if gOpts.bqBinary == "" { + err := utils.BinaryInPath("bq") + if err != nil { + return nil, err + } + gOpts.bqBinary = "bq" + } + if gOpts.commonArgs == nil { + gOpts.commonArgs = []string{"--format", "json"} + } + if gOpts.logger == nil { + gOpts.logger = utils.GetLoggerFromT() + } + return gOpts, nil +} + +// initBq checks for a local .bigqueryrc file and creates an empty one if not to avoid forced bigquery initialization, which doesn't output valid json. +func initBq(t testing.TB) { + homeDir, err := os.UserHomeDir() + if err != nil { + t.Fatal(err) + } + fileName := homeDir + "/.bigqueryrc" + _ , err = os.Stat(fileName) + if err != nil && !os.IsNotExist(err) { + t.Fatal(err) + } + file, err := os.Create(fileName) + if err != nil { + t.Fatal(err) + } + file.Close() +} + +// RunCmd executes a bq command and fails test if there are any errors. +func RunCmd(t testing.TB, cmd string, opts ...cmdOption) string { + op, err := RunCmdE(t, cmd, opts...) + if err != nil { + t.Fatal(err) + } + return op +} + +// RunCmdE executes a bq command and return output. +func RunCmdE(t testing.TB, cmd string, opts ...cmdOption) (string, error) { + gOpts, err := newCmdConfig(opts...) + if err != nil { + t.Fatal(err) + } + initBq(t) + // split command into args + args := strings.Fields(cmd) + bqCmd := shell.Command{ + Command: "bq", + Args: append(gOpts.commonArgs, args...), + Logger: gOpts.logger, + } + return shell.RunCommandAndGetStdOutE(t, bqCmd) +} + +// Run executes a bq command and returns value as gjson.Result. +// It fails the test if there are any errors executing the bq command or parsing the output value. +func Run(t testing.TB, cmd string, opts ...cmdOption) gjson.Result { + op := RunCmd(t, cmd, opts...) + if !gjson.Valid(op) { + t.Fatalf("Error parsing output, invalid json: %s", op) + } + return gjson.Parse(op) +} + +// RunWithCmdOptsf executes a bq command and returns value as gjson.Result. +// +// RunWithCmdOptsf(t, ops.., "ls --datasets --project_id=%s", "projectId") +// +// It fails the test if there are any errors executing the bq command or parsing the output value. +func RunWithCmdOptsf(t testing.TB, opts []cmdOption, cmd string, args ...interface{}) gjson.Result { + return Run(t, utils.StringFromTextAndArgs(append([]interface{}{cmd}, args...)...), opts...) +} + +// Runf executes a bq command and returns value as gjson.Result. +// +// Runf(t, "ls --datasets --project_id=%s", "projectId") +// +// It fails the test if there are any errors executing the bq command or parsing the output value. +func Runf(t testing.TB, cmd string, args ...interface{}) gjson.Result { + return Run(t, utils.StringFromTextAndArgs(append([]interface{}{cmd}, args...)...)) +} diff --git a/infra/blueprint-test/pkg/bq/bq_test.go b/infra/blueprint-test/pkg/bq/bq_test.go new file mode 100644 index 00000000000..a46f4bf8528 --- /dev/null +++ b/infra/blueprint-test/pkg/bq/bq_test.go @@ -0,0 +1,51 @@ +/** + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package bq provides a set of helpers to interact with bq tool (part of CloudSDK) +package bq + +import ( + "os" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestRunf(t *testing.T) { + tests := []struct { + name string + cmd string + projectIdEnvVar string + }{ + { + name: "Runf", + cmd: "query --nouse_legacy_sql 'select * FROM %s.samples.INFORMATION_SCHEMA.TABLES limit 1;'", + projectIdEnvVar: "bigquery-public-data", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if projectName, present := os.LookupEnv(tt.projectIdEnvVar); present { + op := Runf(t, tt.cmd, projectName) + assert := assert.New(t) + assert.Contains(op.Array()[0], "creation_time") + } else { + t.Logf("Skipping test, %s envvar not set", tt.projectIdEnvVar) + t.Skip() + } + }) + } +} diff --git a/infra/blueprint-test/pkg/cai/cai.go b/infra/blueprint-test/pkg/cai/cai.go new file mode 100644 index 00000000000..58474edf721 --- /dev/null +++ b/infra/blueprint-test/pkg/cai/cai.go @@ -0,0 +1,82 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package cai provides a set of helpers to interact with Cloud Asset Inventory +package cai + +import ( + "fmt" + "strings" + "testing" + "time" + + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/gcloud" + "github.com/tidwall/gjson" +) + +type CmdCfg struct { + sleep int // minutes to sleep prior to CAI retreval. default: 2 + assetTypes []string // asset types to retrieve. empty: all + args []string // arguments to pass to call +} + +type cmdOption func(*CmdCfg) + +// newCmdConfig sets defaults and options +func newCmdConfig(opts ...cmdOption) (*CmdCfg) { + caiOpts := &CmdCfg{ + sleep: 2, + assetTypes: nil, + args: nil, + } + + for _, opt := range opts { + opt(caiOpts) + } + + if caiOpts.assetTypes != nil { + caiOpts.args = []string{"--asset-types", strings.Join(caiOpts.assetTypes, ",")} + } + caiOpts.args = append(caiOpts.args, "--content-type", "resource") + + return caiOpts +} + +// Set custom sleep minutes +func WithSleep(sleep int) cmdOption { + return func(f *CmdCfg) { + f.sleep = sleep + } +} + +// Set asset types +func WithAssetTypes(assetTypes []string) cmdOption { + return func(f *CmdCfg) { + f.assetTypes = assetTypes + } +} + +// GetProjectResources returns the cloud asset inventory resources for a project as a gjson.Result +func GetProjectResources(t testing.TB, project string, opts ...cmdOption) gjson.Result { + caiOpts := newCmdConfig(opts...) + + // Cloud Asset Inventory offers best-effort data freshness. + t.Logf("Sleeping for %d minutes before retrieving Cloud Asset Inventory...", caiOpts.sleep) + time.Sleep(time.Duration(caiOpts.sleep) * time.Minute) + + cmd := fmt.Sprintf("asset list --project %s", project) + return gcloud.Runf(t, strings.Join(append([]string{cmd}, caiOpts.args...), " ")) +} diff --git a/infra/blueprint-test/pkg/discovery/config.go b/infra/blueprint-test/pkg/discovery/config.go index dd7ab873d76..73229a6b8e6 100644 --- a/infra/blueprint-test/pkg/discovery/config.go +++ b/infra/blueprint-test/pkg/discovery/config.go @@ -49,11 +49,11 @@ func GetTestConfig(path string) (BlueprintTestConfig, error) { // isValidTestConfig validates a given BlueprintTestConfig func isValidTestConfig(b BlueprintTestConfig) error { - if b.APIVersion != blueprintTestAPIVersion { - return fmt.Errorf("invalid APIVersion %s expected %s", b.APIVersion, blueprintTestAPIVersion) + if b.ResourceMeta.APIVersion != blueprintTestAPIVersion { + return fmt.Errorf("invalid APIVersion %s expected %s", b.ResourceMeta.APIVersion, blueprintTestAPIVersion) } - if b.Kind != blueprintTestKind { - return fmt.Errorf("invalid Kind %s expected %s", b.Kind, blueprintTestKind) + if b.ResourceMeta.Kind != blueprintTestKind { + return fmt.Errorf("invalid Kind %s expected %s", b.ResourceMeta.Kind, blueprintTestKind) } return nil } diff --git a/infra/blueprint-test/pkg/discovery/config_test.go b/infra/blueprint-test/pkg/discovery/config_test.go index b7301439c0d..2ff73e42f2d 100644 --- a/infra/blueprint-test/pkg/discovery/config_test.go +++ b/infra/blueprint-test/pkg/discovery/config_test.go @@ -1,7 +1,6 @@ package discovery import ( - "io/ioutil" "os" "path" "testing" @@ -66,10 +65,10 @@ spec: func setupTestCfg(t *testing.T, data string) string { t.Helper() assert := assert.New(t) - baseDir, err := ioutil.TempDir("", "") + baseDir, err := os.MkdirTemp("", "") assert.NoError(err) fPath := path.Join(baseDir, "test.yaml") - err = ioutil.WriteFile(fPath, []byte(data), 0644) + err = os.WriteFile(fPath, []byte(data), 0644) assert.NoError(err) return fPath } diff --git a/infra/blueprint-test/pkg/discovery/discover.go b/infra/blueprint-test/pkg/discovery/discover.go index 3989f301912..bb6995c0f0e 100644 --- a/infra/blueprint-test/pkg/discovery/discover.go +++ b/infra/blueprint-test/pkg/discovery/discover.go @@ -19,7 +19,6 @@ package discovery import ( "fmt" - "io/ioutil" "os" "path" @@ -59,15 +58,15 @@ func FindTestConfigs(t testing.TB, intTestDir string) map[string]string { fixturesBase := path.Join(testBase, "../", FixtureDir) explicitTests, err := findDirs(testBase) if err != nil { - t.Logf("Error discovering explicit tests: %v", err) + t.Logf("Skipping explicit tests discovery: %v", err) } fixtures, err := findDirs(fixturesBase) if err != nil { - t.Logf("Error discovering fixtures: %v", err) + t.Logf("Skipping fixtures discovery: %v", err) } examples, err := findDirs(examplesBase) if err != nil { - t.Logf("Error discovering examples: %v", err) + t.Logf("Skipping examples discovery: %v", err) } testCases := make(map[string]string) @@ -105,13 +104,13 @@ func GetKnownDirInParents(dir string, max int) (string, error) { if !os.IsNotExist(err) { return dirInParent, err } - return GetKnownDirInParents(path.Join("..", dir), max-1) + return GetKnownDirInParents(dirInParent, max-1) } // findDirs returns a map of directories in path func findDirs(path string) (map[string]bool, error) { dirs := make(map[string]bool) - files, err := ioutil.ReadDir(path) + files, err := os.ReadDir(path) if err != nil { return dirs, err } diff --git a/infra/blueprint-test/pkg/gcloud/gcloud.go b/infra/blueprint-test/pkg/gcloud/gcloud.go index 8d5803b8208..29118384892 100644 --- a/infra/blueprint-test/pkg/gcloud/gcloud.go +++ b/infra/blueprint-test/pkg/gcloud/gcloud.go @@ -1,5 +1,5 @@ /** - * Copyright 2021 Google LLC + * Copyright 2021-2024 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,11 +18,13 @@ package gcloud import ( + "fmt" "strings" "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/utils" "github.com/gruntwork-io/terratest/modules/logger" "github.com/gruntwork-io/terratest/modules/shell" + "github.com/mattn/go-shellwords" "github.com/mitchellh/go-testing-interface" "github.com/tidwall/gjson" ) @@ -78,22 +80,30 @@ func newCmdConfig(opts ...cmdOption) (*CmdCfg, error) { // RunCmd executes a gcloud command and fails test if there are any errors. func RunCmd(t testing.TB, cmd string, opts ...cmdOption) string { + op, err := RunCmdE(t, cmd, opts...) + if err != nil { + t.Fatal(err) + } + return op +} + +// RunCmdE executes a gcloud command and return output. +func RunCmdE(t testing.TB, cmd string, opts ...cmdOption) (string, error) { gOpts, err := newCmdConfig(opts...) if err != nil { t.Fatal(err) } // split command into args - args := strings.Fields(cmd) + args, err := shellwords.Parse(cmd) + if err != nil { + t.Fatal(err) + } gcloudCmd := shell.Command{ Command: "gcloud", Args: append(args, gOpts.commonArgs...), Logger: gOpts.logger, } - op, err := shell.RunCommandAndGetStdOutE(t, gcloudCmd) - if err != nil { - t.Fatal(err) - } - return op + return shell.RunCommandAndGetStdOutE(t, gcloudCmd) } // Run executes a gcloud command and returns value as gjson.Result. @@ -105,3 +115,49 @@ func Run(t testing.TB, cmd string, opts ...cmdOption) gjson.Result { } return gjson.Parse(op) } + +// TFVet executes gcloud beta terraform vet +func TFVet(t testing.TB, planFilePath string, policyLibraryPath, terraformVetProject string) gjson.Result { + op, err := RunCmdE(t, fmt.Sprintf("beta terraform vet %s --policy-library=%s --project=%s", planFilePath, policyLibraryPath, terraformVetProject)) + if err != nil && !(strings.Contains(err.Error(), "Validating resources") && strings.Contains(err.Error(), "done")) { + t.Fatal(err) + } + if !gjson.Valid(op) { + t.Fatalf("Error parsing output, invalid json: %s", op) + } + return gjson.Parse(op) +} + +// RunWithCmdOptsf executes a gcloud command and returns value as gjson.Result. +// +// RunWithCmdOptsf(t, ops.., "projects list --filter=%s", "projectId") +// +// It fails the test if there are any errors executing the gcloud command or parsing the output value. +func RunWithCmdOptsf(t testing.TB, opts []cmdOption, cmd string, args ...interface{}) gjson.Result { + return Run(t, utils.StringFromTextAndArgs(append([]interface{}{cmd}, args...)...), opts...) +} + +// Runf executes a gcloud command and returns value as gjson.Result. +// +// Runf(t, "projects list --filter=%s", "projectId") +// +// It fails the test if there are any errors executing the gcloud command or parsing the output value. +func Runf(t testing.TB, cmd string, args ...interface{}) gjson.Result { + return Run(t, utils.StringFromTextAndArgs(append([]interface{}{cmd}, args...)...)) +} + +// ActivateCredsAndEnvVars activates credentials and exports auth related envvars. +func ActivateCredsAndEnvVars(t testing.TB, creds string) { + credsPath, err := utils.WriteTmpFile(creds) + if err != nil { + t.Fatal(err) + } + RunCmd(t, "auth activate-service-account", WithCommonArgs([]string{"--key-file", credsPath})) + // set auth related env vars + // TF provider auth + utils.SetEnv(t, "GOOGLE_CREDENTIALS", creds) + // gcloud SDK override + utils.SetEnv(t, "CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE", credsPath) + // ADC + utils.SetEnv(t, "GOOGLE_APPLICATION_CREDENTIALS", credsPath) +} diff --git a/infra/blueprint-test/pkg/gcloud/gcloud_test.go b/infra/blueprint-test/pkg/gcloud/gcloud_test.go new file mode 100644 index 00000000000..d6ecf0e3b82 --- /dev/null +++ b/infra/blueprint-test/pkg/gcloud/gcloud_test.go @@ -0,0 +1,102 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package gcloud provides a set of helpers to interact with gcloud(Cloud SDK) binary +package gcloud + +import ( + "os" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestActivateCredsAndEnvVars(t *testing.T) { + tests := []struct { + name string + keyEnvVar string + user string + }{ + { + name: "with sa key", + keyEnvVar: "TEST_KEY", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + creds, present := os.LookupEnv(tt.keyEnvVar) + if !present { + t.Logf("Skipping test, %s envvar not set", tt.keyEnvVar) + t.Skip() + } + ActivateCredsAndEnvVars(t, creds) + assert := assert.New(t) + assert.Equal(os.Getenv("GOOGLE_CREDENTIALS"), creds) + pathEnvVars := []string{"CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE", "GOOGLE_APPLICATION_CREDENTIALS"} + for _, v := range pathEnvVars { + c, err := os.ReadFile(os.Getenv(v)) + assert.NoError(err) + assert.Equal(string(c), creds) + } + + }) + } +} + +func TestRunf(t *testing.T) { + tests := []struct { + name string + cmd string + projectIdEnvVar string + }{ + { + name: "Runf", + cmd: "projects list --filter=%s", + projectIdEnvVar: "TEST_PROJECT_ID", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if projectName, present := os.LookupEnv(tt.projectIdEnvVar); present { + op := Runf(t, tt.cmd, projectName) + assert := assert.New(t) + assert.Equal(projectName, op.Array()[0].Get("projectId").String()) + } else { + t.Logf("Skipping test, %s envvar not set", tt.projectIdEnvVar) + t.Skip() + } + }) + } +} + +func TestRun(t *testing.T) { + tests := []struct { + name string + cmd string + }{ + { + name: "Run with quotes", + cmd: "organizations list --filter=\"DISPLAY_NAME!=google.com AND lifecycleState=ACTIVE\"", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + op := Runf(t, tt.cmd) + assert := assert.New(t) + assert.NotEmpty(op.String()) + }) + } +} diff --git a/infra/blueprint-test/pkg/golden/golden.go b/infra/blueprint-test/pkg/golden/golden.go new file mode 100644 index 00000000000..0e47f3116f4 --- /dev/null +++ b/infra/blueprint-test/pkg/golden/golden.go @@ -0,0 +1,178 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless assertd by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package golden helps manage goldenfiles. +package golden + +import ( + "fmt" + "os" + "path" + "strings" + + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/gcloud" + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/utils" + "github.com/mitchellh/go-testing-interface" + "github.com/stretchr/testify/assert" + "github.com/tidwall/gjson" + "golang.org/x/sync/errgroup" +) + +const ( + gfDir = "testdata" + gfPerms = 0755 + gfUpdateEnvVar = "UPDATE_GOLDEN" + gGoroutinesMax = 24 +) + +type GoldenFile struct { + dir string + fileName string + sanitizers []Sanitizer + t testing.TB +} + +type Sanitizer func(string) string + +// StringSanitizer replaces all occurrences of old string with new string +func StringSanitizer(old, new string) Sanitizer { + return func(s string) string { + return strings.ReplaceAll(s, old, new) + } +} + +// ProjectIDSanitizer replaces all occurrences of current gcloud project ID with PROJECT_ID string +func ProjectIDSanitizer(t testing.TB) Sanitizer { + return func(s string) string { + projectID := gcloud.Run(t, "config get-value project") + if projectID.String() == "[]" { + t.Logf("no project ID currently set, skipping ProjectIDSanitizer: %s", projectID.String()) + return s + } + return strings.ReplaceAll(s, projectID.String(), "PROJECT_ID") + } +} + +type goldenFileOption func(*GoldenFile) + +func WithDir(dir string) goldenFileOption { + return func(g *GoldenFile) { + g.dir = dir + } +} + +func WithFileName(fn string) goldenFileOption { + return func(g *GoldenFile) { + g.fileName = fn + } +} + +func WithSanitizer(s Sanitizer) goldenFileOption { + return func(g *GoldenFile) { + g.sanitizers = append(g.sanitizers, s) + } +} + +func WithStringSanitizer(old, new string) goldenFileOption { + return func(g *GoldenFile) { + g.sanitizers = append(g.sanitizers, StringSanitizer(old, new)) + } +} + +func NewOrUpdate(t testing.TB, data string, opts ...goldenFileOption) *GoldenFile { + g := &GoldenFile{ + dir: gfDir, + fileName: fmt.Sprintf("%s.json", strings.ReplaceAll(t.Name(), "/", "-")), + sanitizers: []Sanitizer{ProjectIDSanitizer(t)}, + t: t, + } + for _, opt := range opts { + opt(g) + } + g.update(data) + return g +} + +// update updates goldenfile data iff gfUpdateEnvVar is true +func (g *GoldenFile) update(data string) { + // exit early if gfUpdateEnvVar is not set or true + if strings.ToLower(os.Getenv(gfUpdateEnvVar)) != "true" { + return + } + fp := g.GetName() + err := os.MkdirAll(path.Dir(fp), gfPerms) + if err != nil { + g.t.Fatalf("error updating result: %v", err) + } + // apply sanitizers on data + data = g.ApplySanitizers(data) + + err = os.WriteFile(fp, []byte(data), gfPerms) + if err != nil { + g.t.Fatalf("error updating result: %v", err) + } +} + +// GetName return path of the goldenfile +func (g *GoldenFile) GetName() string { + return path.Join(g.dir, g.fileName) +} + +// ApplySanitizers returns sanitized string +func (g *GoldenFile) ApplySanitizers(s string) string { + for _, sanitizer := range g.sanitizers { + s = sanitizer(s) + } + return s +} + +// GetSanitizedJSON returns sanitizes and returns JSON result +func (g *GoldenFile) GetSanitizedJSON(s gjson.Result) gjson.Result { + resultStr := s.String() + resultStr = g.ApplySanitizers(resultStr) + return utils.ParseJSONResult(g.t, resultStr) +} + +// GetJSON returns goldenfile as parsed json +func (g *GoldenFile) GetJSON() gjson.Result { + return utils.LoadJSON(g.t, g.GetName()) +} + +// JSONEq asserts that json content in jsonPath for got and goldenfile is the same +func (g *GoldenFile) JSONEq(a *assert.Assertions, got gjson.Result, jsonPath string) { + gf := g.GetJSON() + getPath := fmt.Sprintf("%s|@ugly", jsonPath) + gotData := g.ApplySanitizers(got.Get(getPath).String()) + gfData := gf.Get(getPath).String() + a.Equalf(gfData, gotData, "For path %q expected %q to match fixture %q", jsonPath, gotData, gfData) +} + +// JSONPathEqs asserts that json content in jsonPaths for got and goldenfile are the same +func (g *GoldenFile) JSONPathEqs(a *assert.Assertions, got gjson.Result, jsonPaths []string) { + syncGroup := new(errgroup.Group) + syncGroup.SetLimit(gGoroutinesMax) + g.t.Logf("Checking %d JSON paths with max %d goroutines", len(jsonPaths), gGoroutinesMax) + for _, jsonPath := range jsonPaths { + jsonPath := jsonPath + syncGroup.Go(func() error { + g.JSONEq(a, got, jsonPath) + return nil + }) + } + if err := syncGroup.Wait(); err != nil { + g.t.Fatal(err) + } +} diff --git a/infra/blueprint-test/pkg/golden/golden_test.go b/infra/blueprint-test/pkg/golden/golden_test.go new file mode 100644 index 00000000000..c5466b884f1 --- /dev/null +++ b/infra/blueprint-test/pkg/golden/golden_test.go @@ -0,0 +1,203 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless assertd by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package golden + +import ( + "fmt" + "os" + "strings" + "testing" + + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/gcloud" + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/utils" + "github.com/stretchr/testify/assert" + + gotest "github.com/mitchellh/go-testing-interface" +) + +const testProjectID = "foo" + +func TestUpdate(t *testing.T) { + tests := []struct { + name string + data string + skipUpdate bool + want string + }{ + { + name: "simple", + data: "foo", + want: "foo", + }, + { + name: "with-prev-data", + data: "{\"baz\":\"qux\"}", + skipUpdate: true, + want: "{\"foo\":\"bar\"}", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert := assert.New(t) + if !tt.skipUpdate { + os.Setenv(gfUpdateEnvVar, "true") + defer os.Unsetenv(gfUpdateEnvVar) + } + + got := NewOrUpdate(t, tt.data) + + if !tt.skipUpdate { + defer os.Remove(got.GetName()) + } + j, err := os.ReadFile(got.GetName()) + assert.NoError(err) + assert.Equal(tt.want, string(j)) + + }) + } +} + +func TestJSONEq(t *testing.T) { + tests := []struct { + name string + data string + eqPath string + opts []goldenFileOption + want string + setProjectID bool + }{ + { + name: "nested", + data: "{\"foo\":\"bar\",\"baz\":{\"qux\":\"quz\"}}", + eqPath: "baz", + want: "{\"qux\":\"quz\"}", + }, + { + name: "empty path", + data: "{\"foo\":\"bar\",\"baz\":{\"qux\":\"quz\"}}", + eqPath: "cookie", + want: "", + }, + { + name: "sanitize quz", + data: "{\"foo\":\"bar\",\"baz\":{\"qux\":\"quz\"}}", + opts: []goldenFileOption{WithSanitizer(StringSanitizer("quz", "REPLACED"))}, + eqPath: "baz", + want: "{\"qux\":\"REPLACED\"}", + }, + { + name: "sanitize projectID", + data: fmt.Sprintf("{\"foo\":\"bar\",\"baz\":{\"qux\":\"%s\"}}", testProjectID), + opts: []goldenFileOption{WithSanitizer(ProjectIDSanitizer(t))}, + setProjectID: true, + eqPath: "baz", + want: "{\"qux\":\"PROJECT_ID\"}", + }, + { + name: "no gcloud projectID set", + data: fmt.Sprintf("{\"foo\":\"bar\",\"baz\":{\"qux\":\"%s\"}}", testProjectID), + opts: []goldenFileOption{WithSanitizer(ProjectIDSanitizer(t))}, + eqPath: "baz", + want: fmt.Sprintf("{\"qux\":\"%s\"}", testProjectID), + }, + { + name: "multiple sanitizers quz", + data: "{\"foo\":\"bar\",\"baz\":{\"qux\":\"quz\",\"quux\":\"quuz\"}}", + opts: []goldenFileOption{ + WithSanitizer(StringSanitizer("quz", "REPLACED")), + WithSanitizer(func(s string) string { return strings.ReplaceAll(s, "quuz", "NEW") }), + }, + eqPath: "baz", + want: "{\"qux\":\"REPLACED\",\"quux\":\"NEW\"}", + }, + { + name: "diff_whitespace", + data: "{\"list\":[\n \"SYSTEM_COMPONENTS\",\n \"POD\",\n \"DAEMONSET\",\n \"DEPLOYMENT\",\n \"STATEFULSET\",\n \"STORAGE\",\n \"HPA\",\n \"CADVISOR\",\n \"KUBELET\"\n ]}", + eqPath: "list", + want: "[\"SYSTEM_COMPONENTS\",\"POD\",\"DAEMONSET\",\"DEPLOYMENT\",\"STATEFULSET\",\"STORAGE\",\"HPA\",\"CADVISOR\",\"KUBELET\"]", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert := assert.New(t) + if tt.setProjectID { + gcloud.Runf(t, "config set project %s", testProjectID) + defer gcloud.Run(t, "config unset project") + } + os.Setenv(gfUpdateEnvVar, "true") + defer os.Unsetenv(gfUpdateEnvVar) + got := NewOrUpdate(t, tt.data, tt.opts...) + defer os.Remove(got.GetName()) + got.JSONEq(assert, utils.ParseJSONResult(t, tt.data), tt.eqPath) + assert.Equal(tt.want, got.GetJSON().Get(tt.eqPath).Get("@ugly").String()) + }) + } +} + +func TestJSONEqs(t *testing.T) { + tests := []struct { + name string + data string + eqPaths []string + opts []goldenFileOption + want string + hasError bool + }{ + { + name: "simple", + data: "{\"foo\":\"bar\",\"baz\":{\"qux\":\"quz\"},\"fizz\":\"pop\"}", + eqPaths: []string{"foo","baz"}, + want: "{\"foo\":\"bar\",\"baz\":{\"qux\":\"quz\"}}", + hasError: false, + }, + { + name: "simple space diff", + data: "{\"foo\":\"bar\",\"baz\":{\"qux\":\"quz\"},\"fizz\":\"pop\"}", + eqPaths: []string{"foo","baz"}, + want: "{ \"foo\":\"bar\",\"baz\":{\"qux\":\"quz\"}}", + hasError: false, + }, + { + name: "simple order diff", + data: "{\"foo\":\"bar\",\"baz\":{\"qux\":\"quz\"},\"fizz\":\"pop\"}", + eqPaths: []string{"foo","baz"}, + want: "{\"baz\":{\"qux\":\"quz\"},\"foo\":\"bar\",\"foo\":\"bar\"}", + hasError: false, + }, + { + name: "false", + data: "{\"foo\":\"bar\",\"baz\":{\"qux\":\"quz\"},\"fizz\":\"pop\"}", + eqPaths: []string{"foo","baz"}, + want: "{\"foo\":\"bar\",\"baz\":{\"qux\":\"quz1\"}}", + hasError: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + innerT := &gotest.RuntimeT{} + innerAssert := assert.New(innerT) + os.Setenv(gfUpdateEnvVar, "true") + defer os.Unsetenv(gfUpdateEnvVar) + got := NewOrUpdate(t, tt.data, tt.opts...) + defer os.Remove(got.GetName()) + got.JSONPathEqs(innerAssert, utils.ParseJSONResult(t, tt.want), tt.eqPaths) + + assert := assert.New(t) + assert.True(innerT.Failed() == tt.hasError) + }) + } +} diff --git a/infra/blueprint-test/pkg/golden/testdata/TestUpdate-with-prev-data.json b/infra/blueprint-test/pkg/golden/testdata/TestUpdate-with-prev-data.json new file mode 100755 index 00000000000..9f5dd4e3d9f --- /dev/null +++ b/infra/blueprint-test/pkg/golden/testdata/TestUpdate-with-prev-data.json @@ -0,0 +1 @@ +{"foo":"bar"} \ No newline at end of file diff --git a/infra/blueprint-test/pkg/kpt/kpt.go b/infra/blueprint-test/pkg/kpt/kpt.go index efa2eaa1b73..d6c3c4b97d6 100644 --- a/infra/blueprint-test/pkg/kpt/kpt.go +++ b/infra/blueprint-test/pkg/kpt/kpt.go @@ -1,17 +1,29 @@ package kpt import ( + "fmt" + "time" + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/utils" + kptfilev1 "github.com/GoogleContainerTools/kpt-functions-sdk/go/api/kptfile/v1" + kptutil "github.com/GoogleContainerTools/kpt-functions-sdk/go/api/util" "github.com/gruntwork-io/terratest/modules/logger" + "github.com/gruntwork-io/terratest/modules/retry" "github.com/gruntwork-io/terratest/modules/shell" "github.com/mitchellh/go-testing-interface" + "sigs.k8s.io/kustomize/kyaml/kio/kioutil" + "sigs.k8s.io/kustomize/kyaml/yaml" ) +// MIN_KPT_VERSION format: vMAJOR[.MINOR[.PATCH[-PRERELEASE]]] +const MIN_KPT_VERSION = "v1.0.0-beta.16" + type CmdCfg struct { kptBinary string // kpt binary dir string // dir to execute commands in logger *logger.Logger // custom logger t testing.TB // TestingT or TestingB + tries int // qty to try kpt command, default: 3 } type cmdOption func(*CmdCfg) @@ -39,6 +51,7 @@ func NewCmdConfig(t testing.TB, opts ...cmdOption) *CmdCfg { kOpts := &CmdCfg{ logger: utils.GetLoggerFromT(), t: t, + tries: 3, } // apply options for _, opt := range opts { @@ -51,6 +64,11 @@ func NewCmdConfig(t testing.TB, opts ...cmdOption) *CmdCfg { } kOpts.kptBinary = "kpt" } + // Validate required KPT version + if err := utils.MinSemver("v"+GetKptVersion(t, kOpts.kptBinary), MIN_KPT_VERSION); err != nil { + t.Fatalf("unable to validate minimum required kpt version: %v", err) + } + return kOpts } @@ -61,9 +79,41 @@ func (k *CmdCfg) RunCmd(args ...string) string { Logger: k.logger, WorkingDir: k.dir, } - op, err := shell.RunCommandAndGetStdOutE(k.t, kptCmd) + command := func() (string, error) { + return shell.RunCommandAndGetStdOutE(k.t, kptCmd) + } + op, err := retry.DoWithRetryE(k.t, fmt.Sprintf("kpt %v", kptCmd.Args), k.tries, 15*time.Second, command) if err != nil { k.t.Fatal(err) } return op } + +// findKptfile discovers Kptfile of the root package from slice of nodes +func findKptfile(nodes []*yaml.RNode) (*kptfilev1.KptFile, error) { + for _, node := range nodes { + if node.GetAnnotations()[kioutil.PathAnnotation] == kptfilev1.KptFileName { + s, err := node.String() + if err != nil { + return nil, fmt.Errorf("unable to read Kptfile: %v", err) + } + kf, err := kptutil.DecodeKptfile(s) + if err != nil { + return nil, fmt.Errorf("unable to decode Kptfile: %v", err) + } + return kf, nil + } + } + return nil, fmt.Errorf("unable to find Kptfile, please include --include-meta-resources flag if a Kptfile is present") +} + +// GetKptVersion gets the version of kptBinary +func GetKptVersion(t testing.TB, kptBinary string) string { + kVersionOpts := &CmdCfg{ + kptBinary: kptBinary, + dir: utils.GetWD(t), + logger: utils.GetLoggerFromT(), + t: t, + } + return kVersionOpts.RunCmd("version") +} diff --git a/infra/blueprint-test/pkg/kpt/setters.go b/infra/blueprint-test/pkg/kpt/setters.go index ebc90d2e177..3aae1b299c8 100644 --- a/infra/blueprint-test/pkg/kpt/setters.go +++ b/infra/blueprint-test/pkg/kpt/setters.go @@ -5,7 +5,6 @@ import ( "os" "strings" - "github.com/GoogleContainerTools/kpt-functions-catalog/functions/go/list-setters/listsetters" "sigs.k8s.io/kustomize/kyaml/kio" "sigs.k8s.io/kustomize/kyaml/kio/kioutil" "sigs.k8s.io/kustomize/kyaml/yaml" @@ -13,7 +12,7 @@ import ( // UpsertSetters inserts or updates setters if apply-setters fn config is discovered. func UpsertSetters(nodes []*yaml.RNode, setters map[string]string) error { - kf, err := listsetters.FindKptfile(nodes) + kf, err := findKptfile(nodes) if err != nil { return err } @@ -42,7 +41,7 @@ func UpsertSetters(nodes []*yaml.RNode, setters map[string]string) error { return nil } -//findSetterNode finds setter node from a slice of nodes. +// findSetterNode finds setter node from a slice of nodes. func findSetterNode(nodes []*yaml.RNode, path string) (*yaml.RNode, error) { for _, node := range nodes { np := node.GetAnnotations()[kioutil.PathAnnotation] diff --git a/infra/blueprint-test/pkg/kpt/status.go b/infra/blueprint-test/pkg/kpt/status.go index 61cef8af361..0d4ce2f57ae 100644 --- a/infra/blueprint-test/pkg/kpt/status.go +++ b/infra/blueprint-test/pkg/kpt/status.go @@ -7,40 +7,36 @@ import ( ) const ( - // Individual apply events can have either of resourceApplied or resourceFailed status. - // https://github.com/GoogleContainerTools/kpt/blob/2a817f60cf7132c88fd2e526c02b800cf927c048/thirdparty/cli-utils/printers/json/formatter.go#L31 - ApplyType = "apply" - ResourceAppliedEventType = "resourceApplied" - ResourceFailedEventType = "resourceFailed" - // Unchanged operation represents a resource that remained unchanged. - ResourceOperationUnchanged = "Unchanged" - // Group event of type apply has completed status - CompletedEventType = "completed" + // Resource event of type apply has apply type + ResourceApplyType = "apply" + // Status of successful resource event + ResourceOperationSuccessful = "Successful" + // Group event of type apply has summary type + CompletedEventType = "summary" ) type ResourceApplyStatus struct { - EventType string `json:"eventType"` Group string `json:"group,omitempty"` Kind string `json:"kind,omitempty"` Name string `json:"name,omitempty"` Namespace string `json:"namespace,omitempty"` - Operation string `json:"operation"` + Status string `json:"status"` + Timestamp string `json:"timestamp"` Type string `json:"type"` } type GroupApplyStatus struct { - EventType string `json:"eventType"` - Count int `json:"count"` - CreatedCount int `json:"createdCount"` - UnchangedCount int `json:"unchangedCount"` - ConfiguredCount int `json:"configuredCount"` - FailedCount int `json:"failedCount"` - ServerSideCount int `json:"serverSideCount"` - Operation string `json:"operation"` - Type string `json:"type"` + Action string `json:"action"` + Count int `json:"count"` + Failed int `json:"failed"` + Skipped int `json:"skipped"` + Status string `json:"status"` + Successful int `json:"successful"` + Timestamp string `json:"timestamp"` + Type string `json:"type"` } -// GetPkgApplyResourcesStatus finds individual kpt apply statuses from newline seperated string of apply statuses +// GetPkgApplyResourcesStatus finds individual kpt apply statuses from newline separated string of apply statuses // and converts them into a slice of ResourceApplyStatus. func GetPkgApplyResourcesStatus(jsonStatus string) ([]ResourceApplyStatus, error) { var statuses []ResourceApplyStatus @@ -51,7 +47,7 @@ func GetPkgApplyResourcesStatus(jsonStatus string) ([]ResourceApplyStatus, error if err != nil { return nil, fmt.Errorf("error unmarshalling %s: %v", status, err) } - if s.Type == ApplyType && (s.EventType == ResourceAppliedEventType || s.EventType == ResourceFailedEventType) { + if s.Type == ResourceApplyType { statuses = append(statuses, s) } @@ -59,7 +55,7 @@ func GetPkgApplyResourcesStatus(jsonStatus string) ([]ResourceApplyStatus, error return statuses, nil } -// GetPkgApplyGroupStatus finds the first group kpt apply status from newline seperated string of apply statuses +// GetPkgApplyGroupStatus finds the first group kpt apply status from newline separated string of apply statuses // and converts it into a GroupApplyStatus. func GetPkgApplyGroupStatus(jsonStatus string) (GroupApplyStatus, error) { var s GroupApplyStatus @@ -70,7 +66,7 @@ func GetPkgApplyGroupStatus(jsonStatus string) (GroupApplyStatus, error) { if err != nil { return s, fmt.Errorf("error unmarshalling %s: %v", resourceStatuses[i], err) } - if s.Type == ApplyType && (s.EventType == CompletedEventType) { + if s.Type == CompletedEventType { return s, nil } } diff --git a/infra/blueprint-test/pkg/krmt/krm.go b/infra/blueprint-test/pkg/krmt/krm.go index 84ae36d39de..b03f43a6ad4 100644 --- a/infra/blueprint-test/pkg/krmt/krm.go +++ b/infra/blueprint-test/pkg/krmt/krm.go @@ -30,6 +30,7 @@ type KRMBlueprintTest struct { discovery.BlueprintTestConfig // additional blueprint test configs name string // descriptive name for the test exampleDir string // directory containing KRM blueprint example + additionalResources []string // paths to directories or files containing additional resources to be applied buildDir string // directory to hydrated blueprint configs pre apply kpt *kpt.CmdCfg // kpt cmd config timeout string // timeout for KRM resource status @@ -58,6 +59,12 @@ func WithDir(dir string) krmtOption { } } +func WithAdditionalResources(rscs ...string) krmtOption { + return func(f *KRMBlueprintTest) { + f.additionalResources = append(f.additionalResources, rscs...) + } +} + func WithBuildDir(buildDir string) krmtOption { return func(f *KRMBlueprintTest) { f.buildDir = buildDir @@ -123,16 +130,22 @@ func NewKRMBlueprintTest(t testing.TB, opts ...krmtOption) *KRMBlueprintTest { t.Fatalf("Dir path %s does not exist", krmt.exampleDir) } } else { - cwd, err := os.Getwd() - if err != nil { - t.Fatalf("unable to get wd :%v", err) - } - exampleDir, err := discovery.GetConfigDirFromTestDir(cwd) + exampleDir, err := discovery.GetConfigDirFromTestDir(utils.GetWD(t)) if err != nil { t.Fatalf("unable to detect KRM dir :%v", err) } krmt.exampleDir = exampleDir } + // if explicit resourcesDir is provided, validate it. + if len(krmt.additionalResources) != 0 { + for _, path := range krmt.additionalResources { + _, err := os.Stat(path) + if os.IsNotExist(err) { + t.Fatalf("Path for additional resources %s does not exist", path) + } + } + } + // discover test config var err error krmt.BlueprintTestConfig, err = discovery.GetTestConfig(path.Join(krmt.exampleDir, discovery.DefaultTestConfigFilename)) @@ -154,12 +167,8 @@ func NewKRMBlueprintTest(t testing.TB, opts ...krmtOption) *KRMBlueprintTest { // getDefaultBuildDir returns a temporary build directory for hydrated configs. func (b *KRMBlueprintTest) getDefaultBuildDir() string { - cwd, err := os.Getwd() - if err != nil { - b.t.Fatalf("unable to get wd :%v", err) - } - buildDir := path.Join(cwd, tmpBuildDir) - err = os.MkdirAll(buildDir, 0755) + buildDir := path.Join(utils.GetWD(b.t), tmpBuildDir) + err := os.MkdirAll(buildDir, 0755) if err != nil { b.t.Fatalf("unable to create %s :%v", buildDir, err) } @@ -201,6 +210,15 @@ func (b *KRMBlueprintTest) setupBuildDir() { if err != nil { b.t.Fatalf("unable to copy %s to %s :%v", b.exampleDir, b.buildDir, err) } + // copy over additional resources into build dir, if present + if len(b.additionalResources) != 0 { + for _, path := range b.additionalResources { + err = copy.Copy(path, b.buildDir) + if err != nil { + b.t.Fatalf("unable to copy %s to %s :%v", path, b.buildDir, err) + } + } + } // subsequent kpt pkg update requires a clean git repo without uncommitted changes // init a new git repo in build dir and commit changes git := git.NewCmdConfig(b.t, git.WithDir(b.buildDir)) @@ -215,7 +233,9 @@ func (b *KRMBlueprintTest) updateSetters() { if err != nil { b.t.Fatalf("unable to read resources in %s :%v", b.buildDir, err) } - kpt.UpsertSetters(rs, b.setters) + if err := kpt.UpsertSetters(rs, b.setters); err != nil { + b.t.Fatalf("unable to upsert setters in %s :%v", b.buildDir, err) + } err = kpt.WritePkgResources(b.buildDir, rs) if err != nil { b.t.Fatalf("unable to write resources in %s :%v", b.buildDir, err) @@ -258,21 +278,21 @@ func (b *KRMBlueprintTest) DefaultApply(assert *assert.Assertions) { b.kpt.RunCmd("live", "status", "--output", "json", "--poll-until", "current", "--timeout", b.timeout) } -// DefaultVerify asserts no resource changes exist after apply. +// DefaultVerify asserts all resources are status successful func (b *KRMBlueprintTest) DefaultVerify(assert *assert.Assertions) { jsonOp := b.kpt.RunCmd("live", "apply", "--output", "json") - // assert each resource is unchanged from initial apply + // assert each resource status is successful resourceStatus, err := kpt.GetPkgApplyResourcesStatus(jsonOp) assert.NoError(err, "Resource statuses should be parsable") for _, r := range resourceStatus { - assert.Equal(kpt.ResourceOperationUnchanged, r.Operation, "Resource should be unchanged") + assert.Equal(kpt.ResourceOperationSuccessful, r.Status, "Status should be successful") } - // assert count of resources applied equals count of resources unchanged + // assert count of resources applied equals count of resources successful groupStatus, err := kpt.GetPkgApplyGroupStatus(jsonOp) assert.NoError(err, "Group status should be parsable") - assert.Equal(groupStatus.Count, groupStatus.UnchangedCount, "All resources should be unchanged") + assert.Equal(groupStatus.Count, groupStatus.Successful, "All resources should be successful") } @@ -284,7 +304,7 @@ func (b *KRMBlueprintTest) DefaultTeardown(assert *assert.Assertions) { // ShouldSkip checks if a test should be skipped func (b *KRMBlueprintTest) ShouldSkip() bool { - return b.Spec.Skip + return b.BlueprintTestConfig.Spec.Skip } // AutoDiscoverAndTest discovers KRM config from examples/fixtures and runs tests. @@ -341,7 +361,8 @@ func (b *KRMBlueprintTest) Teardown(assert *assert.Assertions) { // Test runs init, apply, verify, teardown in order for the blueprint. func (b *KRMBlueprintTest) Test() { if b.ShouldSkip() { - b.logger.Logf(b.t, "Skipping test due to config %s", b.Path) + b.logger.Logf(b.t, "Skipping test due to config %s", b.BlueprintTestConfig.Path) + b.t.SkipNow() return } a := assert.New(b.t) @@ -351,3 +372,12 @@ func (b *KRMBlueprintTest) Test() { utils.RunStage("apply", func() { b.Apply(a) }) utils.RunStage("verify", func() { b.Verify(a) }) } + +// GetBuildDir returns the temporary build dir created for hydrating config. Defaults to .build/test-name. +func (b *KRMBlueprintTest) GetBuildDir() string { + if b.buildDir == "" { + b.t.Fatalf("unable to get a valid build directory") + } + + return b.buildDir +} diff --git a/infra/blueprint-test/pkg/tft/terraform.go b/infra/blueprint-test/pkg/tft/terraform.go index 147753fdaca..89c20e44bba 100644 --- a/infra/blueprint-test/pkg/tft/terraform.go +++ b/infra/blueprint-test/pkg/tft/terraform.go @@ -1,5 +1,5 @@ /** - * Copyright 2021 Google LLC + * Copyright 2021-2024 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,35 +18,70 @@ package tft import ( + b64 "encoding/base64" "fmt" "os" "path" + "path/filepath" "strings" - gotest "testing" + "time" "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/discovery" + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/gcloud" "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/utils" + "github.com/alexflint/go-filemutex" "github.com/gruntwork-io/terratest/modules/logger" "github.com/gruntwork-io/terratest/modules/terraform" + "github.com/hashicorp/terraform-config-inspect/tfconfig" "github.com/mitchellh/go-testing-interface" "github.com/stretchr/testify/assert" + "github.com/tidwall/gjson" +) + +const ( + setupKeyOutputName = "sa_key" + tftCacheMutexFilename = "bpt-tft-cache.lock" + planFilename = "plan.tfplan" +) + +var ( + CommonRetryableErrors = map[string]string{ + // Project deletion is eventually consistent. Even if google_project resources inside the folder are deleted there maybe a deletion error. + ".*FOLDER_TO_DELETE_NON_EMPTY_VIOLATION.*": "Failed to delete non empty folder.", + + // API activation is eventually consistent. Even if the google_project_service resource is reconciled there maybe an activation error. + ".*SERVICE_DISABLED.*": "Required API not enabled.", + } ) // TFBlueprintTest implements bpt.Blueprint and stores information associated with a Terraform blueprint test. type TFBlueprintTest struct { - discovery.BlueprintTestConfig // additional blueprint test configs - name string // descriptive name for the test - tfDir string // directory containing Terraform configs - tfEnvVars map[string]string // variables to pass to Terraform as environment variables prefixed with TF_VAR_ - setupDir string // optional directory containing applied TF configs to import outputs as variables for the test - vars map[string]interface{} // variables to pass to Terraform as flags - logger *logger.Logger // custom logger - t testing.TB // TestingT or TestingB - init func(*assert.Assertions) // init function - apply func(*assert.Assertions) // apply function - verify func(*assert.Assertions) // verify function - teardown func(*assert.Assertions) // teardown function + discovery.BlueprintTestConfig // additional blueprint test configs + name string // descriptive name for the test + saKey string // optional setup sa key + tfDir string // directory containing Terraform configs + tfEnvVars map[string]string // variables to pass to Terraform as environment variables prefixed with TF_VAR_ + backendConfig map[string]interface{} // backend configuration for terraform init + retryableTerraformErrors map[string]string // If Terraform apply fails with one of these (transient) errors, retry. The keys are a regexp to match against the error and the message is what to display to a user if that error is matched. + maxRetries int // Maximum number of times to retry errors matching RetryableTerraformErrors + timeBetweenRetries time.Duration // The amount of time to wait between retries + migrateState bool // suppress user confirmation in a migration in terraform init + setupDir string // optional directory containing applied TF configs to import outputs as variables for the test + policyLibraryPath string // optional absolute path to directory containing policy library constraints + terraformVetProject string // optional a valid existing project that will be used when a plan has resources in a project that still does not exist. + vars map[string]interface{} // variables to pass to Terraform as flags + logger *logger.Logger // custom logger + sensitiveLogger *logger.Logger // custom logger for sensitive logging + t testing.TB // TestingT or TestingB + init func(*assert.Assertions) // init function + plan func(*terraform.PlanStruct, *assert.Assertions) // plan function + apply func(*assert.Assertions) // apply function + verify func(*assert.Assertions) // verify function + teardown func(*assert.Assertions) // teardown function + setupOutputOverrides map[string]interface{} // override outputs from the Setup phase + tftCacheMutex *filemutex.FileMutex // Mutex to protect Terraform plugin cache + parallelism int // Set the parallelism setting for Terraform } type tftOption func(*TFBlueprintTest) @@ -57,6 +92,12 @@ func WithName(name string) tftOption { } } +func WithSetupSaKey(saKey string) tftOption { + return func(f *TFBlueprintTest) { + f.saKey = saKey + } +} + func WithFixtureName(fixtureName string) tftOption { return func(f *TFBlueprintTest) { // when a test is invoked for an explicit blueprint fixture @@ -80,12 +121,34 @@ func WithEnvVars(envVars map[string]string) tftOption { } } +func WithBackendConfig(backendConfig map[string]interface{}) tftOption { + return func(f *TFBlueprintTest) { + f.backendConfig = backendConfig + f.migrateState = true + } +} + +func WithRetryableTerraformErrors(retryableTerraformErrors map[string]string, maxRetries int, timeBetweenRetries time.Duration) tftOption { + return func(f *TFBlueprintTest) { + f.retryableTerraformErrors = retryableTerraformErrors + f.maxRetries = maxRetries + f.timeBetweenRetries = timeBetweenRetries + } +} + func WithSetupPath(setupPath string) tftOption { return func(f *TFBlueprintTest) { f.setupDir = setupPath } } +func WithPolicyLibraryPath(policyLibraryPath, terraformVetProject string) tftOption { + return func(f *TFBlueprintTest) { + f.policyLibraryPath = policyLibraryPath + f.terraformVetProject = terraformVetProject + } +} + func WithVars(vars map[string]interface{}) tftOption { return func(f *TFBlueprintTest) { f.vars = vars @@ -98,15 +161,41 @@ func WithLogger(logger *logger.Logger) tftOption { } } +func WithSensitiveLogger(logger *logger.Logger) tftOption { + return func(f *TFBlueprintTest) { + f.sensitiveLogger = logger + } +} + +// WithSetupOutputs overrides output values from the setup stage +func WithSetupOutputs(vars map[string]interface{}) tftOption { + return func(f *TFBlueprintTest) { + f.setupOutputOverrides = vars + } +} + +func WithParallelism(p int) tftOption { + return func(f *TFBlueprintTest) { + f.parallelism = p + } +} + // NewTFBlueprintTest sets defaults, validates and returns a TFBlueprintTest. func NewTFBlueprintTest(t testing.TB, opts ...tftOption) *TFBlueprintTest { + var err error tft := &TFBlueprintTest{ name: fmt.Sprintf("%s TF Blueprint", t.Name()), tfEnvVars: make(map[string]string), t: t, } + // initiate tft cache file mutex + tft.tftCacheMutex, err = filemutex.New(filepath.Join(os.TempDir(), tftCacheMutexFilename)) + if err != nil { + t.Fatalf("tft lock file <%s> could not created: %v", filepath.Join(os.TempDir(), tftCacheMutexFilename), err) + } // default TF blueprint methods tft.init = tft.DefaultInit + // No default plan function, plan is skipped if no custom func provided. tft.apply = tft.DefaultApply tft.verify = tft.DefaultVerify tft.teardown = tft.DefaultTeardown @@ -118,6 +207,10 @@ func NewTFBlueprintTest(t testing.TB, opts ...tftOption) *TFBlueprintTest { if tft.logger == nil { tft.logger = utils.GetLoggerFromT() } + // If no custom sensitive logger, use discard logger. + if tft.sensitiveLogger == nil { + tft.sensitiveLogger = logger.Discard + } // if explicit tfDir is provided, validate it else try auto discovery if tft.tfDir != "" { _, err := os.Stat(tft.tfDir) @@ -125,18 +218,14 @@ func NewTFBlueprintTest(t testing.TB, opts ...tftOption) *TFBlueprintTest { t.Fatalf("TFDir path %s does not exist", tft.tfDir) } } else { - cwd, err := os.Getwd() - if err != nil { - t.Fatalf("unable to get wd :%v", err) - } - tfdir, err := discovery.GetConfigDirFromTestDir(cwd) + tfdir, err := discovery.GetConfigDirFromTestDir(utils.GetWD(t)) if err != nil { t.Fatalf("unable to detect TFDir :%v", err) } tft.tfDir = tfdir } + // discover test config - var err error tft.BlueprintTestConfig, err = discovery.GetTestConfig(path.Join(tft.tfDir, discovery.DefaultTestConfigFilename)) if err != nil { t.Fatal(err) @@ -150,24 +239,91 @@ func NewTFBlueprintTest(t testing.TB, opts ...tftOption) *TFBlueprintTest { tft.setupDir = setupDir } } - //load TFEnvVars from setup outputs + // load setup sa Key + if tft.saKey != "" { + gcloud.ActivateCredsAndEnvVars(tft.t, tft.saKey) + } + // load TFEnvVars from setup outputs if tft.setupDir != "" { tft.logger.Logf(tft.t, "Loading env vars from setup %s", tft.setupDir) - loadTFEnvVar(tft.tfEnvVars, tft.getTFOutputsAsInputs(terraform.OutputAll(tft.t, &terraform.Options{TerraformDir: tft.setupDir, Logger: tft.logger}))) + outputs := tft.getOutputs(tft.sensitiveOutputs(tft.setupDir)) + loadTFEnvVar(tft.tfEnvVars, tft.getTFOutputsAsInputs(outputs)) + if credsEnc, exists := tft.tfEnvVars[fmt.Sprintf("TF_VAR_%s", setupKeyOutputName)]; tft.saKey == "" && exists { + if credDec, err := b64.StdEncoding.DecodeString(credsEnc); err == nil { + gcloud.ActivateCredsAndEnvVars(tft.t, string(credDec)) + } else { + tft.t.Fatalf("Unable to decode setup sa key: %v", err) + } + } else { + tft.logger.Logf(tft.t, "Skipping credential activation %s output from setup", setupKeyOutputName) + } + } + // Load env vars to supplement/override setup + tft.logger.Logf(tft.t, "Loading setup from environment") + if tft.setupOutputOverrides == nil { + tft.setupOutputOverrides = make(map[string]interface{}) + } + for k, v := range extractFromEnv("CFT_SETUP_") { + tft.setupOutputOverrides[k] = v } - tft.logger.Logf(tft.t, "Running tests TF configs in %s", tft.tfDir) + tftVersion := gjson.Get(terraform.RunTerraformCommand(tft.t, tft.GetTFOptions(), "version", "-json"), "terraform_version") + tft.logger.Logf(tft.t, "Running tests TF configs in %s with version %s", tft.tfDir, tftVersion) return tft } +// sensitiveOutputs returns a map of sensitive output keys for module in dir. +func (b *TFBlueprintTest) sensitiveOutputs(dir string) map[string]bool { + mod, err := tfconfig.LoadModule(dir) + if err != nil { + b.t.Fatalf("error loading module in %s: %v", dir, err) + } + sensitiveOP := map[string]bool{} + for _, op := range mod.Outputs { + if op.Sensitive { + sensitiveOP[op.Name] = true + } + } + return sensitiveOP +} + +// getOutputs returns all output values. +func (b *TFBlueprintTest) getOutputs(sensitive map[string]bool) map[string]interface{} { + // allow only parallel reads as Terraform plugin cache isn't concurrent safe + rUnlockFn := b.rLockFn() + defer rUnlockFn() + outputs := terraform.OutputAll(b.t, &terraform.Options{TerraformDir: b.setupDir, Logger: b.sensitiveLogger, NoColor: true}) + for k, v := range outputs { + _, s := sensitive[k] + if s { + b.sensitiveLogger.Logf(b.t, "output key %q: %v", k, v) + } else { + b.logger.Logf(b.t, "output key %q: %v", k, v) + } + } + return outputs +} + // GetTFOptions generates terraform.Options used by Terratest. func (b *TFBlueprintTest) GetTFOptions() *terraform.Options { - return terraform.WithDefaultRetryableErrors(b.t, &terraform.Options{ - TerraformDir: b.tfDir, - EnvVars: b.tfEnvVars, - Vars: b.vars, - Logger: b.logger, + newOptions := terraform.WithDefaultRetryableErrors(b.t, &terraform.Options{ + TerraformDir: b.tfDir, + EnvVars: b.tfEnvVars, + Vars: b.vars, + Logger: b.logger, + BackendConfig: b.backendConfig, + MigrateState: b.migrateState, + RetryableTerraformErrors: b.retryableTerraformErrors, + NoColor: true, + Parallelism: b.parallelism, }) + if b.maxRetries > 0 { + newOptions.MaxRetries = b.maxRetries + } + if b.timeBetweenRetries > 0 { + newOptions.TimeBetweenRetries = b.timeBetweenRetries + } + return newOptions } // getTFOutputsAsInputs computes a map of TF inputs from outputs map. @@ -204,16 +360,96 @@ func getKVFromOutputString(v string) (string, string, error) { // GetStringOutput returns TF output for a given key as string. // It fails test if given key does not output a primitive. func (b *TFBlueprintTest) GetStringOutput(name string) string { + // allow only parallel reads as Terraform plugin cache isn't concurrent safe + rUnlockFn := b.rLockFn() + defer rUnlockFn() return terraform.Output(b.t, b.GetTFOptions(), name) } +// GetStringOutputList returns TF output for a given key as list. +// It fails test if given key does not output a primitive. +// +// Deprecated: Use GetJsonOutput instead. +func (b *TFBlueprintTest) GetStringOutputList(name string) []string { + // allow only parallel reads as Terraform plugin cache isn't concurrent safe + rUnlockFn := b.rLockFn() + defer rUnlockFn() + return terraform.OutputList(b.t, b.GetTFOptions(), name) +} + +// GetJsonOutput returns TF output for key as gjson.Result. +// An empty string for key can be used to return all values. +// It fails test on invalid JSON. +func (b *TFBlueprintTest) GetJsonOutput(key string) gjson.Result { + // allow only parallel reads as Terraform plugin cache isn't concurrent safe + rUnlockFn := b.rLockFn() + defer rUnlockFn() + + jsonString := terraform.OutputJson(b.t, b.GetTFOptions(), key) + if !gjson.Valid(jsonString) { + b.t.Fatalf("Invalid JSON: %s", jsonString) + } + + return gjson.Parse(jsonString) +} + // GetTFSetupOutputListVal returns TF output from setup for a given key as list. // It fails test if given key does not output a list type. func (b *TFBlueprintTest) GetTFSetupOutputListVal(key string) []string { + if v, ok := b.setupOutputOverrides[key]; ok { + if listval, ok := v.([]string); ok { + return listval + } else { + b.t.Fatalf("Setup Override %s is not a list value", key) + } + } if b.setupDir == "" { b.t.Fatal("Setup path not set") } - return terraform.OutputList(b.t, &terraform.Options{TerraformDir: b.setupDir, Logger: b.logger}, key) + // allow only parallel reads as Terraform plugin cache isn't concurrent safe + rUnlockFn := b.rLockFn() + defer rUnlockFn() + return terraform.OutputList(b.t, &terraform.Options{TerraformDir: b.setupDir, Logger: b.logger, NoColor: true}, key) +} + +// GetTFSetupStringOutput returns TF setup output for a given key as string. +// It fails test if given key does not output a primitive or if setupDir is not configured. +func (b *TFBlueprintTest) GetTFSetupStringOutput(key string) string { + if v, ok := b.setupOutputOverrides[key]; ok { + return v.(string) + } + if b.setupDir == "" { + b.t.Fatal("Setup path not set") + } + // allow only parallel reads as Terraform plugin cache isn't concurrent safe + rUnlockFn := b.rLockFn() + defer rUnlockFn() + return terraform.Output(b.t, &terraform.Options{TerraformDir: b.setupDir, Logger: b.logger, NoColor: true}, key) +} + +// GetTFSetupJsonOutput returns TF setup output for a given key as gjson.Result. +// An empty string for key can be used to return all values. +// It fails test if given key does not output valid JSON or if setupDir is not configured. +func (b *TFBlueprintTest) GetTFSetupJsonOutput(key string) gjson.Result { + if v, ok := b.setupOutputOverrides[key]; ok { + if !gjson.Valid(v.(string)) { + b.t.Fatalf("Invalid JSON in setup output override: %s", v) + } + return gjson.Parse(v.(string)) + } + if b.setupDir == "" { + b.t.Fatal("Setup path not set") + } + // allow only parallel reads as Terraform plugin cache isn't concurrent safe + rUnlockFn := b.rLockFn() + defer rUnlockFn() + + jsonString := terraform.OutputJson(b.t, &terraform.Options{TerraformDir: b.setupDir, Logger: b.logger, NoColor: true}, key) + if !gjson.Valid(jsonString) { + b.t.Fatalf("Invalid JSON: %s", jsonString) + } + + return gjson.Parse(jsonString) } // loadTFEnvVar adds new env variables prefixed with TF_VAR_ to an existing map of variables. @@ -223,9 +459,32 @@ func loadTFEnvVar(m map[string]string, new map[string]string) { } } +// extractFromEnv parses environment variables with the given prefix, and returns a key-value map. +// e.g. CFT_SETUP_key=value returns map[string]string{"key": "value"} +func extractFromEnv(prefix string) map[string]interface{} { + r := make(map[string]interface{}) + for _, s := range os.Environ() { + k, v, ok := strings.Cut(s, "=") + if !ok { + // skip malformed entries in os.Environ + continue + } + // For env vars with the prefix, extract the key and value + if setupvar, ok := strings.CutPrefix(k, prefix); ok { + r[setupvar] = v + } + } + return r +} + // ShouldSkip checks if a test should be skipped func (b *TFBlueprintTest) ShouldSkip() bool { - return b.Spec.Skip + return b.BlueprintTestConfig.Spec.Skip +} + +// shouldRunTerraformVet checks if terraform vet should be executed +func (b *TFBlueprintTest) shouldRunTerraformVet() bool { + return b.policyLibraryPath != "" } // AutoDiscoverAndTest discovers TF config from examples/fixtures and runs tests. @@ -244,6 +503,11 @@ func (b *TFBlueprintTest) DefineInit(init func(*assert.Assertions)) { b.init = init } +// DefinePlan defines a custom plan function for the blueprint. +func (b *TFBlueprintTest) DefinePlan(plan func(*terraform.PlanStruct, *assert.Assertions)) { + b.plan = plan +} + // DefineApply defines a custom apply function for the blueprint. func (b *TFBlueprintTest) DefineApply(apply func(*assert.Assertions)) { b.apply = apply @@ -270,7 +534,7 @@ func (b *TFBlueprintTest) DefaultVerify(assert *assert.Assertions) { // exit code 0 is success with no diffs, 2 is success with non-empty diff // https://www.terraform.io/docs/cli/commands/plan.html#detailed-exitcode assert.NotEqual(1, e, "plan after apply should not fail") - assert.NotEqual(2, e, "plan after apply should have non-empty diff") + assert.NotEqual(2, e, "plan after apply should have no diff") } // DefaultInit runs TF init and validate on a blueprint. @@ -281,44 +545,176 @@ func (b *TFBlueprintTest) DefaultInit(assert *assert.Assertions) { terraform.Validate(b.t, terraform.WithDefaultRetryableErrors(b.t, &terraform.Options{ TerraformDir: b.tfDir, Logger: b.logger, + NoColor: true, })) } +// Vet runs TF plan, TF show, and gcloud terraform vet on a blueprint. +func (b *TFBlueprintTest) Vet(assert *assert.Assertions) { + jsonPlan, _ := b.PlanAndShow() + filepath, err := utils.WriteTmpFileWithExtension(jsonPlan, "json") + assert.NoError(err) + defer func() { + if err := os.Remove(filepath); err != nil { + b.t.Fatalf("Could not remove plan json: %v", err) + } + }() + results := gcloud.TFVet(b.t, filepath, b.policyLibraryPath, b.terraformVetProject).Array() + assert.Empty(results, "Should have no Terraform Vet violations") +} + // DefaultApply runs TF apply on a blueprint. func (b *TFBlueprintTest) DefaultApply(assert *assert.Assertions) { + if b.shouldRunTerraformVet() { + b.Vet(assert) + } terraform.Apply(b.t, b.GetTFOptions()) } // Init runs the default or custom init function for the blueprint. func (b *TFBlueprintTest) Init(assert *assert.Assertions) { + // allow only single write as Terraform plugin cache isn't concurrent safe + if err := b.tftCacheMutex.Lock(); err != nil { + b.t.Fatalf("Could not acquire lock: %v", err) + } + defer func() { + if err := b.tftCacheMutex.Unlock(); err != nil { + b.t.Fatalf("Could not release lock: %v", err) + } + }() b.init(assert) } +// PlanAndShow performs a Terraform plan, show and returns the parsed plan output. +func (b *TFBlueprintTest) PlanAndShow() (string, *terraform.PlanStruct) { + tDir, err := os.MkdirTemp(os.TempDir(), "btp") + if err != nil { + b.t.Fatalf("Temp directory %q could not created: %v", tDir, err) + } + defer func() { + if err := os.RemoveAll(tDir); err != nil { + b.t.Fatalf("Could not remove plan temp dir: %v", err) + } + }() + + planOpts := b.GetTFOptions() + planOpts.PlanFilePath = filepath.Join(tDir, planFilename) + rUnlockFn := b.rLockFn() + defer rUnlockFn() + terraform.Plan(b.t, planOpts) + // Logging show output is not useful since we log plan output above + // and show output is parsed and retured. + planOpts.Logger = logger.Discard + planJSON := terraform.Show(b.t, planOpts) + ps, err := terraform.ParsePlanJSON(planJSON) + assert.NoError(b.t, err) + return planJSON, ps +} + +// Plan runs the custom plan function for the blueprint. +// If not custom plan function is defined, this stage is skipped. +func (b *TFBlueprintTest) Plan(assert *assert.Assertions) { + if b.plan == nil { + b.logger.Logf(b.t, "skipping plan as no function defined") + return + } + _, ps := b.PlanAndShow() + b.plan(ps, assert) +} + // Apply runs the default or custom apply function for the blueprint. func (b *TFBlueprintTest) Apply(assert *assert.Assertions) { + // allow only parallel reads as Terraform plugin cache isn't concurrent safe + rUnlockFn := b.rLockFn() + defer rUnlockFn() b.apply(assert) } // Verify runs the default or custom verify function for the blueprint. func (b *TFBlueprintTest) Verify(assert *assert.Assertions) { + // allow only parallel reads as Terraform plugin cache isn't concurrent safe + rUnlockFn := b.rLockFn() + defer rUnlockFn() b.verify(assert) } // Teardown runs the default or custom teardown function for the blueprint. func (b *TFBlueprintTest) Teardown(assert *assert.Assertions) { + // allow only parallel reads as Terraform plugin cache isn't concurrent safe + rUnlockFn := b.rLockFn() + defer rUnlockFn() b.teardown(assert) } +const ( + initStage = "init" + planStage = "plan" + applyStage = "apply" + verifyStage = "verify" + teardownStage = "teardown" +) + // Test runs init, apply, verify, teardown in order for the blueprint. func (b *TFBlueprintTest) Test() { if b.ShouldSkip() { - b.logger.Logf(b.t, "Skipping test due to config %s", b.Path) + b.logger.Logf(b.t, "Skipping test due to config %s", b.BlueprintTestConfig.Path) + b.t.SkipNow() return } a := assert.New(b.t) // run stages - utils.RunStage("init", func() { b.Init(a) }) - defer utils.RunStage("teardown", func() { b.Teardown(a) }) - utils.RunStage("apply", func() { b.Apply(a) }) - utils.RunStage("verify", func() { b.Verify(a) }) + utils.RunStage(initStage, func() { b.Init(a) }) + defer utils.RunStage(teardownStage, func() { b.Teardown(a) }) + utils.RunStage(planStage, func() { b.Plan(a) }) + utils.RunStage(applyStage, func() { b.Apply(a) }) + utils.RunStage(verifyStage, func() { b.Verify(a) }) +} + +// RedeployTest deploys the test n times in separate workspaces before teardown. +func (b *TFBlueprintTest) RedeployTest(n int, nVars map[int]map[string]interface{}) { + if n < 2 { + b.t.Fatalf("n should be 2 or greater but got: %d", n) + } + if b.ShouldSkip() { + b.logger.Logf(b.t, "Skipping test due to config %s", b.BlueprintTestConfig.Path) + b.t.SkipNow() + return + } + a := assert.New(b.t) + // capture currently set vars as default if no override + defaultVars := b.vars + overrideVars := func(i int) { + custom, exists := nVars[i] + if exists { + b.vars = custom + } else { + b.vars = defaultVars + } + } + for i := 1; i <= n; i++ { + ws := terraform.WorkspaceSelectOrNew(b.t, b.GetTFOptions(), fmt.Sprintf("test-%d", i)) + overrideVars(i) + utils.RunStage(initStage, func() { b.Init(a) }) + defer func(i int) { + overrideVars(i) + terraform.WorkspaceSelectOrNew(b.t, b.GetTFOptions(), ws) + utils.RunStage(teardownStage, func() { b.Teardown(a) }) + }(i) + utils.RunStage(planStage, func() { b.Plan(a) }) + utils.RunStage(applyStage, func() { b.Apply(a) }) + utils.RunStage(verifyStage, func() { b.Verify(a) }) + } +} + +// rLockFn sets a read mutex lock, and returns the corresponding unlock function. +func (b *TFBlueprintTest) rLockFn() func() { + if err := b.tftCacheMutex.RLock(); err != nil { + b.t.Fatalf("Could not acquire read lock:%v", err) + } + + return func() { + if err := b.tftCacheMutex.RUnlock(); err != nil { + b.t.Fatalf("Could not release read lock: %v", err) + } + } } diff --git a/infra/blueprint-test/pkg/tft/terraform_test.go b/infra/blueprint-test/pkg/tft/terraform_test.go index 1f5fa41f092..7e2cc0cfd0c 100644 --- a/infra/blueprint-test/pkg/tft/terraform_test.go +++ b/infra/blueprint-test/pkg/tft/terraform_test.go @@ -17,7 +17,6 @@ package tft import ( - "io/ioutil" "os" "path" "testing" @@ -76,20 +75,28 @@ output "simple_map" { } } -func getTFOutputMap(t *testing.T, tf string) map[string]interface{} { +// newTestDir creates a new directory suitable for use as TFDir +func newTestDir(t *testing.T, pattern string, input string) string { t.Helper() assert := assert.New(t) // setup tf file - tfDir, err := ioutil.TempDir("", "") + tfDir, err := os.MkdirTemp("", pattern) assert.NoError(err) - defer os.RemoveAll(tfDir) tfFilePath := path.Join(tfDir, "test.tf") - err = ioutil.WriteFile(tfFilePath, []byte(tf), 0644) + err = os.WriteFile(tfFilePath, []byte(input), 0644) assert.NoError(err) + return tfDir +} + +func getTFOutputMap(t *testing.T, tf string) map[string]interface{} { + t.Helper() + + tfDir := newTestDir(t, "", tf) + defer os.RemoveAll(tfDir) // apply tf and get outputs - tOpts := &terraform.Options{TerraformDir: path.Dir(tfFilePath), Logger: logger.Discard} + tOpts := &terraform.Options{TerraformDir: tfDir, Logger: logger.Discard} terraform.Init(t, tOpts) terraform.Apply(t, tOpts) return terraform.OutputAll(t, tOpts) @@ -122,3 +129,146 @@ func TestGetKVFromOutputString(t *testing.T) { }) } } + +func TestSetupOverrideString(t *testing.T) { + tests := []struct { + name string + tfOutputs string + overrides map[string]interface{} + want map[string]string + }{ + {name: "no overrides", + tfOutputs: ` + output "simple_string" { + value = "foo" + } + + output "simple_num" { + value = 1 + } + + output "simple_bool" { + value = true + } + `, + overrides: map[string]interface{}{}, + want: map[string]string{ + "simple_string": "foo", + "simple_num": "1", + "simple_bool": "true", + }, + }, + {name: "all overrides", + tfOutputs: ` + output "simple_string" { + value = "foo" + } + + output "simple_num" { + value = 1 + } + + output "simple_bool" { + value = true + } + `, + overrides: map[string]interface{}{ + "simple_string": "bar", + "simple_num": "2", + "simple_bool": "false", + }, + want: map[string]string{ + "simple_string": "bar", + "simple_num": "2", + "simple_bool": "false", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + emptyDir := newTestDir(t, "empty*", "") + setupDir := newTestDir(t, "setup-*", tt.tfOutputs) + defer os.RemoveAll(emptyDir) + defer os.RemoveAll(setupDir) + b := NewTFBlueprintTest(&testingiface.RuntimeT{}, + WithSetupOutputs(tt.overrides), + WithTFDir(emptyDir), + WithSetupPath(setupDir)) + // create outputs from setup + _, err := terraform.ApplyE(t, &terraform.Options{TerraformDir: setupDir}) + if err != nil { + t.Fatalf("Failed to apply setup: %v", err) + } + for k, want := range tt.want { + if b.GetTFSetupStringOutput(k) != want { + t.Errorf("unexpected string output for %s: want %s got %s", k, want, b.GetStringOutput(k)) + } + } + }) + } +} +func TestSetupOverrideList(t *testing.T) { + tests := []struct { + name string + tfOutputs string + overrides map[string]interface{} + want map[string][]string + }{ + {name: "no overrides", + tfOutputs: ` + output "simple_list" { + value = ["foo","bar"] + } + `, + overrides: map[string]interface{}{}, + want: map[string][]string{ + "simple_list": {"foo", "bar"}, + }, + }, + {name: "all overrides", + tfOutputs: ` + output "simple_list" { + value = ["foo","bar"] + } + `, + overrides: map[string]interface{}{ + "simple_list": []string{"apple", "orange"}, + }, + want: map[string][]string{ + "simple_list": {"apple", "orange"}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + emptyDir := newTestDir(t, "empty*", "") + setupDir := newTestDir(t, "setup-*", tt.tfOutputs) + defer os.RemoveAll(emptyDir) + defer os.RemoveAll(setupDir) + b := NewTFBlueprintTest(&testingiface.RuntimeT{}, + WithSetupOutputs(tt.overrides), + WithTFDir(emptyDir), + WithSetupPath(setupDir)) + // create outputs from setup + _, err := terraform.ApplyE(t, &terraform.Options{TerraformDir: setupDir}) + if err != nil { + t.Fatalf("Failed to apply setup: %v", err) + } + for k, want := range tt.want { + got := b.GetTFSetupOutputListVal(k) + assert.ElementsMatchf(t, got, want, "list mismatch: want %s got %s", want) + } + }) + } + +} + +func TestSetupOverrideFromEnv(t *testing.T) { + t.Setenv("CFT_SETUP_my-key", "my-value") + emptyDir := newTestDir(t, "empty*", "") + defer os.RemoveAll(emptyDir) + b := NewTFBlueprintTest(&testingiface.RuntimeT{}, + WithTFDir(emptyDir)) + got := b.GetTFSetupStringOutput("my-key") + assert.Equal(t, got, "my-value") +} diff --git a/infra/blueprint-test/pkg/utils/asserthttp.go b/infra/blueprint-test/pkg/utils/asserthttp.go new file mode 100644 index 00000000000..ddf68a4c0a7 --- /dev/null +++ b/infra/blueprint-test/pkg/utils/asserthttp.go @@ -0,0 +1,214 @@ +/** + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package utils + +import ( + "errors" + "fmt" + "io" + "net/http" + "strings" + "time" + + "github.com/mitchellh/go-testing-interface" +) + +// AssertHTTP provides a collection of HTTP asserts. +type AssertHTTP struct { + httpClient *http.Client + retryCount int + retryInterval time.Duration +} + +type assertOption func(*AssertHTTP) + +// WithHTTPClient specifies an HTTP client for the AssertHTTP use. +func WithHTTPClient(c *http.Client) assertOption { + return func(ah *AssertHTTP) { + ah.httpClient = c + } +} + +// WithHTTPRequestRetries specifies a HTTP request retry policy. +func WithHTTPRequestRetries(count int, interval time.Duration) assertOption { + return func(ah *AssertHTTP) { + ah.retryCount = count + ah.retryInterval = interval + } +} + +// NewAssertHTTP creates a new AssertHTTP with option overrides. +func NewAssertHTTP(opts ...assertOption) *AssertHTTP { + ah := &AssertHTTP{ + httpClient: http.DefaultClient, + retryCount: 3, + retryInterval: 2 * time.Second, + } + for _, opt := range opts { + opt(ah) + } + return ah +} + +// AssertSuccessWithRetry runs httpRequest and retries on errors outside client control. +func (ah *AssertHTTP) AssertSuccessWithRetry(t testing.TB, r *http.Request) { + t.Helper() + if ah.retryCount == 0 || ah.retryInterval == 0 { + ah.AssertSuccess(t, r) + return + } + + err := PollE(t, ah.httpRequest(t, r), ah.retryCount, ah.retryInterval) + if err != nil { + t.Error(err.Error()) + } +} + +// AssertSuccess runs httpRequest without retry. +func (ah *AssertHTTP) AssertSuccess(t testing.TB, r *http.Request) { + t.Helper() + _, err := ah.httpRequest(t, r)() + if err != nil { + t.Error(err) + } +} + +// AssertResponseWithRetry runs httpResponse and retries on errors outside client control. +func (ah *AssertHTTP) AssertResponseWithRetry(t testing.TB, r *http.Request, wantCode int, want ...string) { + t.Helper() + if ah.retryCount == 0 || ah.retryInterval == 0 { + ah.AssertSuccess(t, r) + return + } + + err := PollE(t, ah.httpResponse(t, r, wantCode, want...), ah.retryCount, ah.retryInterval) + if err != nil { + t.Error(err.Error()) + } +} + +// AssertResponse runs httpResponse without retry. +func (ah *AssertHTTP) AssertResponse(t testing.TB, r *http.Request, wantCode int, want ...string) { + t.Helper() + _, err := ah.httpResponse(t, r, wantCode, want...)() + if err != nil { + t.Error(err) + } +} + +// httpRequest verifies the request is successful by HTTP status code. +func (ah *AssertHTTP) httpRequest(t testing.TB, r *http.Request) func() (bool, error) { + t.Helper() + logger := GetLoggerFromT() + + return func() (bool, error) { + logger.Logf(t, "Sending HTTP Request %s %s", r.Method, r.URL.String()) + got, err := ah.httpClient.Do(r) + if err != nil { + return false, err + } + // Keep trying until the result is success or the request responsibility. + ok, retry := httpRetryCondition(got.StatusCode) + if !ok { + return retry, fmt.Errorf("want 2xx, got %d", got.StatusCode) + } + logger.Logf(t, "Successful HTTP Request %s %s", r.Method, r.URL.String()) + + return retry, nil + } +} + +// httpResponse verifies the requested response has the wanted status code and payload. +func (ah *AssertHTTP) httpResponse(t testing.TB, r *http.Request, wantCode int, want ...string) func() (bool, error) { + t.Helper() + logger := GetLoggerFromT() + + return func() (bool, error) { + t.Logf("Sending HTTP Request %s %s", r.Method, r.URL.String()) + got, err := ah.httpClient.Do(r) + if err != nil { + return false, err + } + defer got.Body.Close() + + // Determine if the request is successful, and if the response indicates + // we should attempt a retry. + ok, retry := httpRetryCondition(got.StatusCode) + if ok { + logger.Logf(t, "Successful HTTP Request %s %s", r.Method, r.URL.String()) + } + + // e is the wrapped error for all expectation mismatches. + var e error + if got.StatusCode != wantCode { + e = errors.Join(e, fmt.Errorf("response code: got %d, want %d", got.StatusCode, wantCode)) + } + + // No further processing required. + if len(want) == 0 { + return false, e + } + + b, err := io.ReadAll(got.Body) + if err != nil { + return retry, errors.Join(e, err) + } + + if len(b) == 0 { + return retry, errors.Join(e, errors.New("empty response body")) + } + + out := string(b) + var bodyErr error + for _, fragment := range want { + if !strings.Contains(out, fragment) { + bodyErr = errors.Join(bodyErr, fmt.Errorf("response body does not contain %q", fragment)) + } + } + + // Only log errors and response body once. + if bodyErr != nil { + logger.Logf(t, "response output:") + logger.Logf(t, strings.TrimSpace(out)) + return retry, errors.Join(e, bodyErr) + } + + return retry, e + } +} + +// httpRetryCondition indicates retry should be attempted on HTTP 1xx, 401, 403, and 5xx errors. +// 401 and 403 are retried in case of lagging authorization configuration. +// First return value indicates successful response. +// Second return value, on true a retry is preferred. +func httpRetryCondition(code int) (bool, bool) { + switch { + case code >= http.StatusOK && code < http.StatusMultipleChoices: + return true, false + case code < http.StatusOK: + return false, false + case code >= http.StatusInternalServerError: + return false, true + // IAM & network configuration propagation is a source of delayed access. + case code == http.StatusUnauthorized || code == http.StatusForbidden: + return false, true + case code >= http.StatusBadRequest: + return false, false + } + + return false, false +} diff --git a/infra/blueprint-test/pkg/utils/asserthttp_test.go b/infra/blueprint-test/pkg/utils/asserthttp_test.go new file mode 100644 index 00000000000..0c3d8d2745c --- /dev/null +++ b/infra/blueprint-test/pkg/utils/asserthttp_test.go @@ -0,0 +1,354 @@ +/** + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package utils_test + +import ( + "fmt" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/utils" +) + +func TestAssertSuccess(t *testing.T) { + tests := []struct { + label string + serverFunc func(t *testing.T) *httptest.Server + requestFunc func(t *testing.T, s *httptest.Server) *http.Request + assertFunc func(t *testing.T, it *inspectableT, ah *utils.AssertHTTP, r *http.Request) + assertRetryFunc func(t *testing.T, it *inspectableT, ah *utils.AssertHTTP, r *http.Request) + }{ + { + label: "success", + serverFunc: func(t *testing.T) *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprint(w, "Hello World") + })) + }, + requestFunc: func(t *testing.T, s *httptest.Server) *http.Request { + r, err := http.NewRequest(http.MethodGet, s.URL, nil) + if err != nil { + t.Fatal(err) + } + return r + }, + assertFunc: func(t *testing.T, it *inspectableT, ah *utils.AssertHTTP, r *http.Request) { + ah.AssertSuccess(it, r) + if it.err != nil { + t.Errorf("wanted success, got %v", it.err) + } + }, + assertRetryFunc: func(t *testing.T, it *inspectableT, ah *utils.AssertHTTP, r *http.Request) { + ah.AssertSuccessWithRetry(it, r) + if it.err != nil { + t.Errorf("wanted success, got %v", it.err) + } + }, + }, + { + label: "request failure", + serverFunc: func(t *testing.T) *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.Error(w, "Not Available", http.StatusServiceUnavailable) + })) + }, + requestFunc: func(t *testing.T, s *httptest.Server) *http.Request { + r, err := http.NewRequest(http.MethodGet, "/nope", nil) + if err != nil { + t.Fatal(err) + } + return r + }, + assertFunc: func(t *testing.T, it *inspectableT, ah *utils.AssertHTTP, r *http.Request) { + ah.AssertSuccess(it, r) + if it.err == nil { + t.Error("wanted error, got success") + } + }, + assertRetryFunc: func(t *testing.T, it *inspectableT, ah *utils.AssertHTTP, r *http.Request) { + ah.AssertSuccessWithRetry(it, r) + if it.err == nil { + t.Error("wanted error, got success") + } + }, + }, + { + label: "response error", + serverFunc: func(t *testing.T) *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.Error(w, "Not Available", http.StatusServiceUnavailable) + })) + }, + requestFunc: func(t *testing.T, s *httptest.Server) *http.Request { + r, err := http.NewRequest(http.MethodGet, s.URL, nil) + if err != nil { + t.Fatal(err) + } + return r + }, + assertFunc: func(t *testing.T, it *inspectableT, ah *utils.AssertHTTP, r *http.Request) { + ah.AssertSuccess(it, r) + if it.err == nil { + t.Errorf("wanted error, got success") + } + }, + assertRetryFunc: func(t *testing.T, it *inspectableT, ah *utils.AssertHTTP, r *http.Request) { + ah.AssertSuccessWithRetry(it, r) + if it.err == nil { + t.Error("wanted error, got success") + } + }, + }, + } + + for _, tc := range tests { + t.Run(tc.label, func(t *testing.T) { + server := tc.serverFunc(t) + defer server.Close() + t.Run("default", func(t *testing.T) { + it := &inspectableT{t, nil} + ah := utils.NewAssertHTTP(utils.WithHTTPClient(server.Client())) + tc.assertFunc(t, it, ah, tc.requestFunc(t, server)) + }) + t.Run("retry", func(t *testing.T) { + it := &inspectableT{t, nil} + ah := utils.NewAssertHTTP( + utils.WithHTTPClient(server.Client()), + utils.WithHTTPRequestRetries(3, time.Millisecond), + ) + tc.assertRetryFunc(t, it, ah, tc.requestFunc(t, server)) + }) + }) + } +} + +func TestAssertResponse(t *testing.T) { + tests := []struct { + label string + serverFunc func(t *testing.T) *httptest.Server + requestFunc func(t *testing.T, s *httptest.Server) *http.Request + assertFunc func(t *testing.T, it *inspectableT, ah *utils.AssertHTTP, r *http.Request) + assertRetryFunc func(t *testing.T, it *inspectableT, ah *utils.AssertHTTP, r *http.Request) + }{ + { + label: "success", + serverFunc: func(t *testing.T) *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprint(w, "Hello World") + })) + }, + requestFunc: func(t *testing.T, s *httptest.Server) *http.Request { + r, err := http.NewRequest(http.MethodGet, s.URL, nil) + if err != nil { + t.Fatal(err) + } + return r + }, + assertFunc: func(t *testing.T, it *inspectableT, ah *utils.AssertHTTP, r *http.Request) { + ah.AssertResponse(it, r, http.StatusOK) + if it.err != nil { + t.Errorf("wanted success, got %v", it.err) + } + }, + assertRetryFunc: func(t *testing.T, it *inspectableT, ah *utils.AssertHTTP, r *http.Request) { + ah.AssertResponseWithRetry(it, r, http.StatusOK) + if it.err != nil { + t.Errorf("wanted success, got %v", it.err) + } + }, + }, + { + label: "request failure", + serverFunc: func(t *testing.T) *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.Error(w, "Not Available", http.StatusServiceUnavailable) + })) + }, + requestFunc: func(t *testing.T, s *httptest.Server) *http.Request { + r, err := http.NewRequest(http.MethodGet, "/nope", nil) + if err != nil { + t.Fatal(err) + } + return r + }, + assertFunc: func(t *testing.T, it *inspectableT, ah *utils.AssertHTTP, r *http.Request) { + ah.AssertResponse(it, r, http.StatusServiceUnavailable) + if it.err == nil { + t.Error("got success, wanted error") + } + }, + assertRetryFunc: func(t *testing.T, it *inspectableT, ah *utils.AssertHTTP, r *http.Request) { + ah.AssertResponseWithRetry(it, r, http.StatusServiceUnavailable) + if it.err == nil { + t.Error("got success, wanted error") + } + }, + }, + { + label: "assert HTTP 503", + serverFunc: func(t *testing.T) *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.Error(w, "Not Available", http.StatusServiceUnavailable) + })) + }, + requestFunc: func(t *testing.T, s *httptest.Server) *http.Request { + r, err := http.NewRequest(http.MethodGet, s.URL, nil) + if err != nil { + t.Fatal(err) + } + return r + }, + assertFunc: func(t *testing.T, it *inspectableT, ah *utils.AssertHTTP, r *http.Request) { + ah.AssertResponse(it, r, http.StatusServiceUnavailable) + if it.err != nil { + t.Errorf("got %v, wanted success", it.err) + } + }, + assertRetryFunc: func(t *testing.T, it *inspectableT, ah *utils.AssertHTTP, r *http.Request) { + ah.AssertResponseWithRetry(it, r, http.StatusServiceUnavailable) + if it.err != nil { + t.Errorf("got %v, wanted success", it.err) + } + }, + }, + { + label: "response error", + serverFunc: func(t *testing.T) *httptest.Server { + var n int = 0 + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + n++ + http.Error(w, fmt.Sprintf("Not Available #%d", n), http.StatusServiceUnavailable) + })) + }, + requestFunc: func(t *testing.T, s *httptest.Server) *http.Request { + r, err := http.NewRequest(http.MethodGet, s.URL, nil) + if err != nil { + t.Fatal(err) + } + return r + }, + assertFunc: func(t *testing.T, it *inspectableT, ah *utils.AssertHTTP, r *http.Request) { + // The single assert is the first request to the test server. + ah.AssertResponse(it, r, http.StatusOK, "#1") + if it.err != nil && !strings.Contains(it.err.Error(), "got 503, want 200") { + t.Error(it.err.Error()) + } else if it.err == nil { + t.Error("got success, want error") + } + }, + assertRetryFunc: func(t *testing.T, it *inspectableT, ah *utils.AssertHTTP, r *http.Request) { + // This function is called given the AssertHTTP object is configured for 3 retries. + // The final request count for three retries is 5: + // - the first request is not a retry and counts as #1 + // - utils.Poll retries n+1 times + // The number is included in this assertion to confirm the error + // is associated with the last retry attempt. + ah.AssertResponseWithRetry(it, r, http.StatusOK, "#5") + if it.err != nil && !strings.Contains(it.err.Error(), "got 503, want 200") { + t.Error(it.err.Error()) + } else if it.err == nil { + t.Error("got success, want error") + } + }, + }, + } + + for _, tc := range tests { + t.Run(tc.label, func(t *testing.T) { + t.Run("default", func(t *testing.T) { + // Unlike other test implementations in this file, server is + // instantiated per sub-case. This ensure the specific count of + // requests matches configured retry expectations. + server := tc.serverFunc(t) + defer server.Close() + it := &inspectableT{t, nil} + ah := utils.NewAssertHTTP(utils.WithHTTPClient(server.Client())) + tc.assertFunc(t, it, ah, tc.requestFunc(t, server)) + }) + t.Run("retry", func(t *testing.T) { + server := tc.serverFunc(t) + defer server.Close() + it := &inspectableT{t, nil} + ah := utils.NewAssertHTTP( + utils.WithHTTPClient(server.Client()), + utils.WithHTTPRequestRetries(3, time.Millisecond), + ) + tc.assertRetryFunc(t, it, ah, tc.requestFunc(t, server)) + }) + }) + } +} + +func TestAssertResponse_contains(t *testing.T) { + tests := []struct { + label string + assertFunc func(t *testing.T, it *inspectableT, ah *utils.AssertHTTP, r *http.Request) + }{ + { + label: "success", + assertFunc: func(t *testing.T, it *inspectableT, ah *utils.AssertHTTP, r *http.Request) { + ah.AssertResponse(it, r, http.StatusOK, "Hello", "World") + if it.err != nil { + t.Errorf("wanted success, got %v", it.err) + } + }, + }, + { + label: "error", + assertFunc: func(t *testing.T, it *inspectableT, ah *utils.AssertHTTP, r *http.Request) { + ah.AssertResponse(it, r, http.StatusOK, "Hello", "Moon") + if it.err == nil { + t.Error("wanted error, got success") + } + }, + }, + { + label: "multi error", + assertFunc: func(t *testing.T, it *inspectableT, ah *utils.AssertHTTP, r *http.Request) { + ah.AssertResponse(it, r, http.StatusOK, "Hello", "Moon", "Dwellers") + if it.err == nil { + t.Error("wanted error, got success") + return + } + if e := it.err.Error(); !strings.Contains(e, "Moon") || !strings.Contains(e, "Dwellers") { + t.Errorf("wanted multiple errors, got one: %v", it.err) + } + }, + }, + } + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprint(w, "Hello World") + })) + defer server.Close() + + r, err := http.NewRequest(http.MethodGet, server.URL, nil) + if err != nil { + t.Fatal(err) + } + + for _, tc := range tests { + t.Run(tc.label, func(t *testing.T) { + it := &inspectableT{t, nil} + ah := utils.NewAssertHTTP(utils.WithHTTPClient(server.Client())) + tc.assertFunc(t, it, ah, r) + }) + } +} diff --git a/infra/blueprint-test/pkg/utils/env.go b/infra/blueprint-test/pkg/utils/env.go index ff2358b9fdc..7aebfcd2c0b 100644 --- a/infra/blueprint-test/pkg/utils/env.go +++ b/infra/blueprint-test/pkg/utils/env.go @@ -31,3 +31,20 @@ func ValFromEnv(t testing.TB, k string) string { } return v } + +// SetEnv set a environment variable. +func SetEnv(t testing.TB, key string, value string) { + err := os.Setenv(key, value) + if err != nil { + t.Fatal("Unable to put environment variable %s: %v", key, err) + } +} + +// Get the environment Working Directory. +func GetWD(t testing.TB) string { + cwd, err := os.Getwd() + if err != nil { + t.Fatalf("unable to get wd :%v", err) + } + return cwd +} diff --git a/infra/blueprint-test/pkg/utils/files.go b/infra/blueprint-test/pkg/utils/files.go new file mode 100644 index 00000000000..8a549ef33c3 --- /dev/null +++ b/infra/blueprint-test/pkg/utils/files.go @@ -0,0 +1,31 @@ +package utils + +import "os" + +// WriteTmpFile writes data to a temp file and returns the path. +func WriteTmpFile(data string) (string, error) { + f, err := os.CreateTemp("", "*") + if err != nil { + return "", err + } + defer f.Close() + _, err = f.Write([]byte(data)) + if err != nil { + return "", err + } + return f.Name(), nil +} + +// WriteTmpFileWithExtension writes data to a temp file with given extension and returns the path. +func WriteTmpFileWithExtension(data string, extension string) (string, error) { + f, err := os.CreateTemp("", "*."+extension) + if err != nil { + return "", err + } + defer f.Close() + _, err = f.Write([]byte(data)) + if err != nil { + return "", err + } + return f.Name(), nil +} diff --git a/infra/blueprint-test/pkg/utils/jsonpaths.go b/infra/blueprint-test/pkg/utils/jsonpaths.go new file mode 100644 index 00000000000..46893243c48 --- /dev/null +++ b/infra/blueprint-test/pkg/utils/jsonpaths.go @@ -0,0 +1,95 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package utils + +import ( + "slices" + "strconv" + "strings" + + "github.com/tidwall/gjson" +) + +// GetJSONPaths returns a []string of all possible JSON paths for a gjson.Result + func GetJSONPaths(result gjson.Result) []string { + return getJSONPaths(result.Value(), []string{}) + } + + func getJSONPaths(item interface{}, crumbs []string) []string { + var paths []string + + switch val := item.(type) { + case []interface{}: + for i, v := range val { + // Add this item to paths + paths = append(paths, strings.Join(append(crumbs, strconv.Itoa(i)), ".")) + // Search child items + paths = append(paths, + getJSONPaths(v, append(crumbs, strconv.Itoa(i)))..., + ) + } + case map[string]interface{}: + for k, v := range val { + // Add this item to paths + paths = append(paths, strings.Join(append(crumbs, k), ".")) + // Search child items + paths = append(paths, + getJSONPaths(v, append(crumbs, k))..., + ) + + } + } + + slices.Sort(paths) + return paths + } + +// GetTerminalJSONPaths returns a []string of all terminal JSON paths for a gjson.Result +func GetTerminalJSONPaths(result gjson.Result) []string { + return getTerminalJSONPaths(result.Value(), []string{}) +} + +func getTerminalJSONPaths(item interface{}, crumbs []string) []string { + var paths []string + + // Only add paths for JSON bool, number, string, and null + switch val := item.(type) { + case bool: + return []string{strings.Join(crumbs, ".")} + case float64: + return []string{strings.Join(crumbs, ".")} + case string: + return []string{strings.Join(crumbs, ".")} + case nil: + return []string{strings.Join(crumbs, ".")} + case []interface{}: + for i, v := range val { + paths = append(paths, + getTerminalJSONPaths(v, append(crumbs, strconv.Itoa(i)))..., + ) + } + case map[string]interface{}: + for k, v := range val { + paths = append(paths, + getTerminalJSONPaths(v, append(crumbs, k))..., + ) + } + } + + slices.Sort(paths) + return paths + } diff --git a/infra/blueprint-test/pkg/utils/jsonpaths_test.go b/infra/blueprint-test/pkg/utils/jsonpaths_test.go new file mode 100644 index 00000000000..4d14f1d3e40 --- /dev/null +++ b/infra/blueprint-test/pkg/utils/jsonpaths_test.go @@ -0,0 +1,92 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package utils + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/tidwall/gjson" +) + + func TestGetJSONPaths(t *testing.T) { + tests := []struct { + name string + json gjson.Result + paths []string + }{ + { + name: "one", + json: gjson.Parse(`{ + "apiVersion": "v1", + "autopilot": {}, + "locations": [ + "europe-west4-b" + ], + "metadata": { + "annotations": [ + {"my-annotation": "test"} + ] + }, + "bool": true, + "number": 3, + "null": null, + }`), + paths: []string{"apiVersion", "autopilot", "bool", "locations", "locations.0", "metadata", "metadata.annotations", "metadata.annotations.0", "metadata.annotations.0.my-annotation", "null", "number"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert := assert.New(t) + assert.Equal(tt.paths, GetJSONPaths(tt.json)) + }) + } + } + + func TestTerminalGetJSONPaths(t *testing.T) { + tests := []struct { + name string + json gjson.Result + paths []string + }{ + { + name: "one", + json: gjson.Parse(`{ + "apiVersion": "v1", + "autopilot": {}, + "locations": [ + "europe-west4-b" + ], + "metadata": { + "annotations": [ + {"my-annotation": "test"} + ] + }, + "bool": true, + "number": 3, + "null": null, + }`), + paths: []string{"apiVersion", "bool", "locations.0", "metadata.annotations.0.my-annotation", "null", "number"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert := assert.New(t) + assert.Equal(tt.paths, GetTerminalJSONPaths(tt.json)) + }) + } +} diff --git a/infra/blueprint-test/pkg/utils/jsonresult.go b/infra/blueprint-test/pkg/utils/jsonresult.go index 82d9cfee05a..7b9636dc491 100644 --- a/infra/blueprint-test/pkg/utils/jsonresult.go +++ b/infra/blueprint-test/pkg/utils/jsonresult.go @@ -17,7 +17,8 @@ package utils import ( - "io/ioutil" + "os" + "regexp" "github.com/mitchellh/go-testing-interface" "github.com/tidwall/gjson" @@ -26,7 +27,7 @@ import ( // LoadJSON reads and parses a json file into a gjson.Result. // It fails test if not unable to parse. func LoadJSON(t testing.TB, path string) gjson.Result { - j, err := ioutil.ReadFile(path) + j, err := os.ReadFile(path) if err != nil { t.Fatalf("Error reading json file %s", path) } @@ -43,3 +44,18 @@ func ParseJSONResult(t testing.TB, j string) gjson.Result { } return gjson.Parse(j) } + +// Kubectl transient errors +var ( + KubectlTransientErrors = []string{ + "E022[23] .* the server is currently unable to handle the request", + } +) + +// Filter transient errors from kubectl output +func ParseKubectlJSONResult(t testing.TB, str string) gjson.Result { + for _, error := range KubectlTransientErrors { + str = regexp.MustCompile(error).ReplaceAllString(str, "") + } + return ParseJSONResult(t, str) +} diff --git a/infra/blueprint-test/pkg/utils/jsonresult_test.go b/infra/blueprint-test/pkg/utils/jsonresult_test.go new file mode 100644 index 00000000000..fa041a9e6a7 --- /dev/null +++ b/infra/blueprint-test/pkg/utils/jsonresult_test.go @@ -0,0 +1,60 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package utils + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestKubectlJSONResult(t *testing.T) { + tests := []struct { + name string + input string + output map[string]interface{} + }{ + { + name: "one error", + input: `E0223 error the server is currently unable to handle the request + { + "apiVersion": "v1" + }`, + output: map[string]interface{}{ + "apiVersion": "v1", + }, + }, + { + name: "two error", + input: `E0223 error the server is currently unable to handle the request + E0222 some other error so the server is currently unable to handle the request + { + "apiVersion": "v1" + }`, + output: map[string]interface{}{ + "apiVersion": "v1", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert := assert.New(t) + funcOut := ParseKubectlJSONResult(t, tt.input) + assert.Equal(tt.output, funcOut.Value().(map[string]interface{})) + }) + } +} diff --git a/infra/blueprint-test/pkg/utils/logger.go b/infra/blueprint-test/pkg/utils/logger.go index c6bfd03ee5f..dfd8655b458 100644 --- a/infra/blueprint-test/pkg/utils/logger.go +++ b/infra/blueprint-test/pkg/utils/logger.go @@ -17,9 +17,13 @@ package utils import ( + "fmt" + "io" + "os" "testing" "github.com/gruntwork-io/terratest/modules/logger" + terraTesting "github.com/gruntwork-io/terratest/modules/testing" ) // GetLoggerFromT returns a logger based on test verbosity @@ -29,3 +33,33 @@ func GetLoggerFromT() *logger.Logger { } return logger.Discard } + +// TestFileLogger is a logger that writes to disk instead of stdout. +// This is useful when you want to redirect verbose logs of long running tests to disk. +type TestFileLogger struct { + pth string + w io.WriteCloser +} + +// NewTestFileLogger returns a TestFileLogger logger that can be used with the WithLogger option. +func NewTestFileLogger(t *testing.T, pth string) (*logger.Logger, func(t *testing.T)) { + f, err := os.OpenFile(pth, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + t.Fatalf("error opening file %s: %v", pth, err) + } + fl := TestFileLogger{ + pth: pth, + w: f, + } + return logger.New(fl), fl.Close +} + +func (fl TestFileLogger) Logf(t terraTesting.TestingT, format string, args ...interface{}) { + logger.DoLog(t, 3, fl.w, fmt.Sprintf(format, args...)) +} + +func (fl TestFileLogger) Close(t *testing.T) { + if err := fl.w.Close(); err != nil { + t.Fatalf("error closing file logger %s: %v", fl.pth, err) + } +} diff --git a/infra/blueprint-test/pkg/utils/logger_test.go b/infra/blueprint-test/pkg/utils/logger_test.go new file mode 100644 index 00000000000..151d27c8996 --- /dev/null +++ b/infra/blueprint-test/pkg/utils/logger_test.go @@ -0,0 +1,53 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package utils + +import ( + "fmt" + "os" + "path" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewTestFileLogger(t *testing.T) { + tests := []struct { + name string + content string + }{ + { + name: "simple", + content: "foo", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert := assert.New(t) + testFile := path.Join(t.TempDir(), fmt.Sprintf("%s.log", tt.name)) + fl, flClose := NewTestFileLogger(t, testFile) + fl.Logf(t, tt.content) + flClose(t) + gotContent, err := os.ReadFile(testFile) + assert.NoError(err) + assert.Contains(string(gotContent), "foo") + // assert we are wrapping logger.DoLog which prints stack/test info + assert.Contains(string(gotContent), fmt.Sprintf("TestNewTestFileLogger/%s", tt.name)) + assert.Contains(string(gotContent), "logger_test.go") + }) + } +} diff --git a/infra/blueprint-test/pkg/utils/poll.go b/infra/blueprint-test/pkg/utils/poll.go index c69a69018b4..3461ce0e1e8 100644 --- a/infra/blueprint-test/pkg/utils/poll.go +++ b/infra/blueprint-test/pkg/utils/poll.go @@ -17,6 +17,7 @@ package utils import ( + "fmt" "time" "github.com/mitchellh/go-testing-interface" @@ -25,12 +26,21 @@ import ( // Polls on a particular condition function while the returns true. // It fails the test if the condition is not met within numRetries. func Poll(t testing.TB, condition func() (bool, error), numRetries int, interval time.Duration) { + err := PollE(t, condition, numRetries, interval) + if err != nil { + t.Fatalf("failed to pull provided condition after %d retries, last error: %v", numRetries, err) + } +} + +// Polls on a particular condition function while the returns true. +// Returns an error if the condition is not met within numRetries. +func PollE(t testing.TB, condition func() (bool, error), numRetries int, interval time.Duration) error { if numRetries < 0 { - t.Fatal("invalid value for numRetries. Must be >= 0") + return &PollParameterError{"invalid value for numRetries. Must be >= 0"} } if interval <= 0 { - t.Fatal("invalid value for numRetries. Must be > 0") + return &PollParameterError{"invalid value for interval. Must be > 0"} } retry, err := condition() @@ -45,10 +55,45 @@ func Poll(t testing.TB, condition func() (bool, error), numRetries int, interval } if err != nil { - t.Fatalf("failed to pull provided condition after %d retries, last error: %v", numRetries, err) + return &PollConditionError{err: err, numRetries: numRetries} } if retry { - t.Fatalf("polling timed out after %d retries with %d second intervals", numRetries, interval/time.Second) + return &PollRetryLimitExceededError{interval: interval, numRetries: numRetries} } + + return nil +} + +// PollParameterError is returend by PollE when input parameters are invalid. +type PollParameterError struct { + msg string +} + +func (e *PollParameterError) Error() string { + return e.msg +} + +// PollRetryLimitExceededError is returned by PollE when retries exceed numRetries. +type PollRetryLimitExceededError struct { + numRetries int + interval time.Duration +} + +func (e *PollRetryLimitExceededError) Error() string { + return fmt.Sprintf("polling timed out after %d retries with %.2f second intervals", e.numRetries, e.interval.Seconds()) +} + +// PollConditionError is an error returned on the final PollE attempt. +type PollConditionError struct { + err error + numRetries int +} + +func (e *PollConditionError) Error() string { + return fmt.Sprintf("failed to pull provided condition after %d retries, last error: %v", e.numRetries, e.err) +} + +func (e *PollConditionError) Unwrap() error { + return e.err } diff --git a/infra/blueprint-test/pkg/utils/poll_test.go b/infra/blueprint-test/pkg/utils/poll_test.go new file mode 100644 index 00000000000..c76dbc2dd01 --- /dev/null +++ b/infra/blueprint-test/pkg/utils/poll_test.go @@ -0,0 +1,92 @@ +/** + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package utils_test + +import ( + "errors" + "strings" + "testing" + "time" + + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/utils" +) + +func TestPoll(t *testing.T) { + testcases := []struct { + label string + condition func() (bool, error) + want string + }{ + { + label: "error", + condition: func() (bool, error) { + return true, errors.New("condition failure") + }, + want: "condition failure", + }, + { + label: "timeout", + condition: func() (bool, error) { + return true, nil + }, + want: "polling timed out", + }, + } + + for _, tc := range testcases { + t.Run(tc.label, func(t *testing.T) { + it := &inspectableT{t, nil} + utils.Poll(it, tc.condition, 3, time.Millisecond) + if !strings.Contains(it.err.Error(), tc.want) { + t.Errorf("got %v, want %v", it.err, tc.want) + } + }) + } +} + +func TestPollE(t *testing.T) { + testcases := []struct { + label string + condition func() (bool, error) + want string + }{ + { + label: "error", + condition: func() (bool, error) { + return true, errors.New("condition failure") + }, + want: "condition failure", + }, + { + label: "timeout", + condition: func() (bool, error) { + return true, nil + }, + want: "polling timed out", + }, + } + + for _, tc := range testcases { + t.Run(tc.label, func(t *testing.T) { + it := &inspectableT{t, nil} + err := utils.PollE(it, tc.condition, 3, time.Millisecond) + if !strings.Contains(err.Error(), tc.want) { + t.Errorf("got %v, want %v", it.err, tc.want) + } + }) + } +} diff --git a/infra/blueprint-test/pkg/utils/rand.go b/infra/blueprint-test/pkg/utils/rand.go index 8743ce2b6f6..322b3679ae5 100644 --- a/infra/blueprint-test/pkg/utils/rand.go +++ b/infra/blueprint-test/pkg/utils/rand.go @@ -24,7 +24,7 @@ import ( // RandStr generates rand lowercase strings of length l func RandStr(l int) string { charSet := "abcdefghijklmnopqrstuvwxyz" - rand.Seed(time.Now().UnixNano()) + rand.New(rand.NewSource(time.Now().UnixNano())) lenCharSet := len(charSet) bytes := make([]byte, l) for i := range bytes { diff --git a/infra/blueprint-test/pkg/utils/string_formatter.go b/infra/blueprint-test/pkg/utils/string_formatter.go new file mode 100644 index 00000000000..5a2f7449ddb --- /dev/null +++ b/infra/blueprint-test/pkg/utils/string_formatter.go @@ -0,0 +1,37 @@ +/** + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package utils + +import "fmt" + +// StringFromTextAndArgs converts msg and args to formatted text +func StringFromTextAndArgs(msgAndArgs ...interface{}) string { + if len(msgAndArgs) == 0 || msgAndArgs == nil { + return "" + } + if len(msgAndArgs) == 1 { + msg := msgAndArgs[0] + if msgAsStr, ok := msg.(string); ok { + return msgAsStr + } + return fmt.Sprintf("%+v", msg) + } + if len(msgAndArgs) > 1 { + return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...) + } + return "" +} diff --git a/infra/blueprint-test/pkg/utils/string_formatter_test.go b/infra/blueprint-test/pkg/utils/string_formatter_test.go new file mode 100644 index 00000000000..6ab77b35a7e --- /dev/null +++ b/infra/blueprint-test/pkg/utils/string_formatter_test.go @@ -0,0 +1,46 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package utils + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestStringFromTextAndArgs(t *testing.T) { + tests := []struct { + name string + cmd string + args []interface{} + output string + }{ + { + name: "one arg", + cmd: "project list --filter=%s", + args: []interface{}{"TEST_PROJECT"}, + output: "project list --filter=TEST_PROJECT", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert := assert.New(t) + funcOut := StringFromTextAndArgs(append([]interface{}{tt.cmd}, tt.args...)...) + assert.Equal(tt.output, funcOut) + }) + } +} diff --git a/infra/blueprint-test/pkg/utils/transforms.go b/infra/blueprint-test/pkg/utils/transforms.go index 786909a429a..1adcc77f9df 100644 --- a/infra/blueprint-test/pkg/utils/transforms.go +++ b/infra/blueprint-test/pkg/utils/transforms.go @@ -19,6 +19,7 @@ package utils import ( "github.com/mitchellh/go-testing-interface" "github.com/tidwall/gjson" + "github.com/tidwall/sjson" ) // GetFirstMatchResult returns the first matching result with a given k/v @@ -40,3 +41,12 @@ func GetResultStrSlice(rs []gjson.Result) []string { } return s } + +// DeleteFromResult deletes given path from result and returns the modified result +func DeleteFromResult(t testing.TB, pth string, r gjson.Result) gjson.Result { + n, err := sjson.Delete(r.String(), pth) + if err != nil { + t.Fatalf("unable to delete path %s in %s", pth, r.String()) + } + return ParseJSONResult(t, n) +} diff --git a/infra/blueprint-test/pkg/utils/utils_test.go b/infra/blueprint-test/pkg/utils/utils_test.go new file mode 100644 index 00000000000..6ccb7531af0 --- /dev/null +++ b/infra/blueprint-test/pkg/utils/utils_test.go @@ -0,0 +1,52 @@ +/** + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package utils_test + +import ( + "errors" + "fmt" + "testing" +) + +// inspectableT wraps testing.T, overriding testing behavior to make error cases retrievable. +type inspectableT struct { + *testing.T + err error +} + +func (it *inspectableT) Error(args ...interface{}) { + it.addError(args...) +} + +func (it *inspectableT) Errorf(format string, args ...interface{}) { + a := append([]interface{}{format}, args) + it.addError(a) +} + +func (it *inspectableT) Fatal(args ...interface{}) { + it.addError(args...) +} + +func (it *inspectableT) Fatalf(format string, args ...interface{}) { + a := append([]interface{}{format}, args) + it.addError(a) +} + +func (it *inspectableT) addError(args ...interface{}) { + s := fmt.Sprint(args...) + it.err = errors.Join(it.err, errors.New(s)) +} diff --git a/infra/blueprint-test/pkg/utils/version.go b/infra/blueprint-test/pkg/utils/version.go new file mode 100644 index 00000000000..481a1630aa7 --- /dev/null +++ b/infra/blueprint-test/pkg/utils/version.go @@ -0,0 +1,37 @@ +/** + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package utils + +import ( + "fmt" + + "golang.org/x/mod/semver" +) + +// MinSemver validates gotSemver is not less than minSemver +func MinSemver(gotSemver string, minSemver string) error { + if !semver.IsValid(gotSemver) { + return fmt.Errorf("unable to parse got version %q", gotSemver) + } else if !semver.IsValid(minSemver) { + return fmt.Errorf("unable to parse minimum version %q", minSemver) + } + if semver.Compare(gotSemver, minSemver) == -1 { + return fmt.Errorf("got version %q is less than minimum version %q", gotSemver, minSemver) + } + + return nil +} diff --git a/infra/blueprint-test/test/README.md b/infra/blueprint-test/test/README.md new file mode 100644 index 00000000000..5ce946d32ec --- /dev/null +++ b/infra/blueprint-test/test/README.md @@ -0,0 +1,28 @@ +# Run blueprint-tests + +Set environment variables: + +```bash +export TF_VAR_org_id="your_org_id" +export TF_VAR_folder_id="your_folder_id" +export TF_VAR_billing_account="your_billing_account_id" +``` + +Create test project: + +```bash +terraform -chdir=setup init +terraform -chdir=setup apply +``` + +Run tests: + +```bash +go test [-v] +``` + +Cleanup test project: + +```bash +terraform -chdir=setup destroy +``` diff --git a/infra/blueprint-test/test/cai_test.go b/infra/blueprint-test/test/cai_test.go new file mode 100644 index 00000000000..54c6de41ea4 --- /dev/null +++ b/infra/blueprint-test/test/cai_test.go @@ -0,0 +1,70 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package test + +import ( + "encoding/base64" + "fmt" + "testing" + + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/cai" + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/gcloud" + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/tft" + "github.com/stretchr/testify/assert" +) + +func TestGetProjectResources(t *testing.T) { + tests := []struct { + name string + assetTypes []string + wantKeyPath string + wantVal string + }{ + {name: "all", assetTypes: nil, wantKeyPath: "resource.data.nodeConfig.imageType", wantVal: "COS_CONTAINERD"}, + {name: "cluster", assetTypes: []string{"container.googleapis.com/Cluster", "compute.googleapis.com/Project"}, wantKeyPath: "resource.data.nodeConfig.imageType", wantVal: "COS_CONTAINERD"}, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + + tfBlueprint := tft.NewTFBlueprintTest(t, + tft.WithTFDir("setup"), + ) + + clusterResourceName := fmt.Sprintf("//container.googleapis.com/projects/%s/locations/%s/clusters/%s", + tfBlueprint.GetStringOutput("project_id"), + tfBlueprint.GetStringOutput("cluster_region"), + tfBlueprint.GetStringOutput("cluster_name"), + ) + + projectResourceName := fmt.Sprintf("//compute.googleapis.com/projects/%s", + tfBlueprint.GetStringOutput("project_id"), + ) + + // Use the test SA for cai call + credDec, _ := base64.StdEncoding.DecodeString(tfBlueprint.GetStringOutput("sa_key")) + gcloud.ActivateCredsAndEnvVars(t, string(credDec)) + + cai := cai.GetProjectResources(t, tfBlueprint.GetStringOutput("project_id"), cai.WithAssetTypes(tt.assetTypes)) + assert.Equal(tfBlueprint.GetStringOutput("project_id"), cai.Get("#(name=\"" + projectResourceName + "\").resource.data.name").String(), "project_id exists in cai") + assert.Equal(tt.wantVal, cai.Get("#(name=\"" + clusterResourceName + "\")." + tt.wantKeyPath).String(), "correct cluster image type") + }) + } +} diff --git a/infra/blueprint-test/test/krm_simple_blueprint_test.go b/infra/blueprint-test/test/krm_simple_blueprint_test.go index ad855f552a3..38da3aa0dda 100644 --- a/infra/blueprint-test/test/krm_simple_blueprint_test.go +++ b/infra/blueprint-test/test/krm_simple_blueprint_test.go @@ -1,26 +1,34 @@ package test import ( - "fmt" + "strings" "testing" "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/gcloud" "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/krmt" - "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/utils" + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/tft" + "github.com/gruntwork-io/terratest/modules/k8s" "github.com/stretchr/testify/assert" ) func TestKRMSimpleBlueprint(t *testing.T) { + tfBlueprint := tft.NewTFBlueprintTest(t, + tft.WithTFDir("setup"), + ) + gcloud.Runf(t, "container clusters get-credentials %s --region=%s --project %s -q", tfBlueprint.GetStringOutput("cluster_name"), tfBlueprint.GetStringOutput("cluster_region"), tfBlueprint.GetStringOutput("project_id")) + networkBlueprint := krmt.NewKRMBlueprintTest(t, krmt.WithDir("../examples/simple_krm_blueprint"), - krmt.WithUpdateCommit("2b93fd6d4f1a3eabdf4dfce05b93ccb1f9f671c5"), + krmt.WithUpdatePkgs(false), ) networkBlueprint.DefineVerify( func(assert *assert.Assertions) { networkBlueprint.DefaultVerify(assert) - op := gcloud.Run(t, fmt.Sprintf("compute networks describe custom-network --project %s", utils.ValFromEnv(t, "PROJECT_ID"))) - assert.Equal("GLOBAL", op.Get("routingConfig.routingMode").String(), "should be GLOBAL") - assert.Equal("false", op.Get("autoCreateSubnetworks").String(), "autoCreateSubnetworks should not be enabled") + k8sOpts := k8s.KubectlOptions{} + op, err := k8s.RunKubectlAndGetOutputE(t, &k8sOpts, "get", "pod", "simple-krm-blueprint", "--no-headers", "-o", "custom-columns=:metadata.name") + assert.NoError(err) + result := strings.Split(op, "\n") + assert.Equal("simple-krm-blueprint", result[len(result)-1]) }) networkBlueprint.Test() } diff --git a/infra/blueprint-test/test/retry_errors_test.go b/infra/blueprint-test/test/retry_errors_test.go new file mode 100644 index 00000000000..5e695e39b4c --- /dev/null +++ b/infra/blueprint-test/test/retry_errors_test.go @@ -0,0 +1,49 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package test + +import ( + "testing" + "time" + + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/tft" + "github.com/gruntwork-io/terratest/modules/terraform" + "github.com/stretchr/testify/assert" +) + +func TestRetryErrors(t *testing.T) { + bpt := tft.NewTFBlueprintTest(t, + tft.WithTFDir("../examples/retry_errors"), + tft.WithSetupPath("./setup"), + tft.WithRetryableTerraformErrors(tft.CommonRetryableErrors, 2, 3*time.Second), + ) + bpt.DefineVerify(func(assert *assert.Assertions) {}) + bpt.DefineTeardown(func(assert *assert.Assertions) {}) + + // The default apply is `terraform.Apply(b.t, b.GetTFOptions())` which has a `require.NoError(t, err)` + // calling `terraform.ApplyE(t, bpt.GetTFOptions())` we are able to process the error end check if it has the + // "unsuccessful after X retries" message. this works for the this test because the code to sent the retry options + // to terraform is in the `bpt.GetTFOptions()` function. + bpt.DefineApply( + func(assert *assert.Assertions) { + out, err := terraform.ApplyE(t, bpt.GetTFOptions()) + assert.Contains(out, "SERVICE_DISABLED") + errMsg := err.Error() + assert.Equal(errMsg, "'terraform [apply -input=false -auto-approve -no-color -lock=false]' unsuccessful after 2 retries") + }) + bpt.Test() +} diff --git a/infra/blueprint-test/test/setup/main.tf b/infra/blueprint-test/test/setup/main.tf new file mode 100644 index 00000000000..27dcdff20e7 --- /dev/null +++ b/infra/blueprint-test/test/setup/main.tf @@ -0,0 +1,75 @@ +/** + * Copyright 2021-2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +locals { + int_required_roles = [ + "roles/compute.networkAdmin", + "roles/compute.securityAdmin", + "roles/iam.serviceAccountUser", + "roles/vpcaccess.admin", + "roles/serviceusage.serviceUsageAdmin", + "roles/container.admin", + "roles/cloudasset.viewer", + "roles/serviceusage.serviceUsageConsumer" + ] +} + +module "project" { + source = "terraform-google-modules/project-factory/google" + version = "~> 17.0" + + name = "ci-bptest" + random_project_id = "true" + org_id = var.org_id + folder_id = var.folder_id + billing_account = var.billing_account + + default_service_account = "DEPRIVILEGE" + deletion_policy = "DELETE" + + activate_apis = [ + "cloudresourcemanager.googleapis.com", + "compute.googleapis.com", + "serviceusage.googleapis.com", + "vpcaccess.googleapis.com", + "container.googleapis.com", + "cloudasset.googleapis.com" + ] +} + +resource "google_service_account" "sa" { + project = module.project.project_id + account_id = "ci-account" + display_name = "ci-account" +} + +resource "google_project_iam_member" "roles" { + for_each = toset(local.int_required_roles) + + project = module.project.project_id + role = each.value + member = "serviceAccount:${google_service_account.sa.email}" +} + +resource "google_service_account_key" "key" { + service_account_id = google_service_account.sa.id +} + +module "kubernetes-engine_example_simple_autopilot_public" { + source = "terraform-google-modules/kubernetes-engine/google//examples/simple_autopilot_public" + version = "~> 34.0" + project_id = module.project.project_id +} diff --git a/infra/blueprint-test/test/setup/outputs.tf b/infra/blueprint-test/test/setup/outputs.tf new file mode 100644 index 00000000000..00e71caeecf --- /dev/null +++ b/infra/blueprint-test/test/setup/outputs.tf @@ -0,0 +1,32 @@ +/** + * Copyright 2021-2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +output "project_id" { + value = module.project.project_id +} + +output "sa_key" { + value = google_service_account_key.key.private_key + sensitive = true +} + +output "cluster_name" { + value = module.kubernetes-engine_example_simple_autopilot_public.cluster_name +} + +output "cluster_region" { + value = module.kubernetes-engine_example_simple_autopilot_public.region +} diff --git a/infra/blueprint-test/test/setup/simple_tf_module/main.tf b/infra/blueprint-test/test/setup/simple_tf_module/main.tf deleted file mode 100644 index 1acbc32327a..00000000000 --- a/infra/blueprint-test/test/setup/simple_tf_module/main.tf +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -module "project" { - source = "terraform-google-modules/project-factory/google" - version = "~> 11.0" - - name = "ci-bptest" - random_project_id = "true" - org_id = var.org_id - folder_id = var.folder_id - billing_account = var.billing_account - - activate_apis = [ - "cloudresourcemanager.googleapis.com", - "compute.googleapis.com", - "serviceusage.googleapis.com", - "vpcaccess.googleapis.com" - ] -} diff --git a/infra/blueprint-test/test/setup/simple_tf_module/outputs.tf b/infra/blueprint-test/test/setup/simple_tf_module/outputs.tf deleted file mode 100644 index e6bc3968cc1..00000000000 --- a/infra/blueprint-test/test/setup/simple_tf_module/outputs.tf +++ /dev/null @@ -1,19 +0,0 @@ -/** - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -output "project_id" { - value = module.project.project_id -} diff --git a/infra/blueprint-test/test/setup/simple_tf_module/variables.tf b/infra/blueprint-test/test/setup/variables.tf similarity index 100% rename from infra/blueprint-test/test/setup/simple_tf_module/variables.tf rename to infra/blueprint-test/test/setup/variables.tf diff --git a/infra/blueprint-test/test/terraform_redeploy_test.go b/infra/blueprint-test/test/terraform_redeploy_test.go new file mode 100644 index 00000000000..39530fdd299 --- /dev/null +++ b/infra/blueprint-test/test/terraform_redeploy_test.go @@ -0,0 +1,44 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package test + +import ( + "testing" + + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/tft" + "github.com/gruntwork-io/terratest/modules/terraform" + "github.com/stretchr/testify/assert" +) + +func TestRedeploy(t *testing.T) { + nt := tft.NewTFBlueprintTest(t, + tft.WithTFDir("../examples/simple_pet_module"), + tft.WithSetupPath(""), + ) + nt.DefineVerify(func(a *assert.Assertions) { + if nt.GetStringOutput("current_ws") == "test-2" { + a.Equal("custom", nt.GetStringOutput("test"), "should have custom var override") + } else { + a.Equal("", nt.GetStringOutput("test"), "should have not have custom var override") + } + }) + nt.RedeployTest(3, map[int]map[string]interface{}{2: {"test": "custom"}}) + expectedWorkspaces := []string{"test-1", "test-2", "test-3"} + for _, ws := range expectedWorkspaces { + terraform.RunTerraformCommand(t, nt.GetTFOptions(), "workspace", "select", ws) + } +} diff --git a/infra/blueprint-test/test/terraform_simple_base_api_module_test.go b/infra/blueprint-test/test/terraform_simple_base_api_module_test.go index 85477c58882..0f3c9caa0f7 100644 --- a/infra/blueprint-test/test/terraform_simple_base_api_module_test.go +++ b/infra/blueprint-test/test/terraform_simple_base_api_module_test.go @@ -18,24 +18,69 @@ package test import ( "fmt" + "io" + "os" + "strings" "testing" "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/gcloud" "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/tft" "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/utils" + "github.com/gruntwork-io/terratest/modules/logger" + tt "github.com/gruntwork-io/terratest/modules/testing" "github.com/stretchr/testify/assert" ) +type customLogger struct { + w io.Writer +} + +func (c *customLogger) Logf(t tt.TestingT, format string, args ...interface{}) { + _, err := fmt.Fprintf(c.w, format, args...) + if err != nil { + t.Fatal(err) + } + _, err = fmt.Fprintln(c.w) + if err != nil { + t.Fatal(err) + } +} + func TestSimpleTFModule(t *testing.T) { + path, _ := os.Getwd() + + // Regular logger that also writes to stdout. + var regularLogs strings.Builder + regularWriter := io.MultiWriter(®ularLogs, os.Stdout) + fakeRegularWriter := &customLogger{ + w: regularWriter, + } + regularLogger := logger.New(fakeRegularWriter) + + // Sensitive logger to capture sensitive output. + var sensitiveLogs strings.Builder + fakeSensitiveWriter := + &customLogger{ + w: &sensitiveLogs, + } + + sensitiveLogger := logger.New(fakeSensitiveWriter) + statePath := fmt.Sprintf("%s/../examples/simple_tf_module/local_backend.tfstate", path) nt := tft.NewTFBlueprintTest(t, tft.WithTFDir("../examples/simple_tf_module"), - tft.WithSetupPath("setup/simple_tf_module"), + tft.WithBackendConfig(map[string]interface{}{ + "path": statePath, + }), + tft.WithSetupPath("setup"), tft.WithEnvVars(map[string]string{"network_name": fmt.Sprintf("foo-%s", utils.RandStr(5))}), + tft.WithLogger(regularLogger), + tft.WithSensitiveLogger(sensitiveLogger), ) utils.RunStage("init", func() { nt.Init(nil) }) defer utils.RunStage("teardown", func() { nt.Teardown(nil) }) + utils.RunStage("plan", func() { nt.Plan(nil) }) utils.RunStage("apply", func() { nt.Apply(nil) }) utils.RunStage("verify", func() { @@ -44,5 +89,20 @@ func TestSimpleTFModule(t *testing.T) { op := gcloud.Run(t, fmt.Sprintf("compute networks subnets describe subnet-01 --project %s --region us-west1", nt.GetStringOutput("project_id"))) assert.Equal("10.10.10.0/24", op.Get("ipCidrRange").String(), "should have the right CIDR") assert.Equal("false", op.Get("logConfig.enable").String(), "logConfig should not be enabled") + assert.FileExists(statePath) }) + + // sa_key is a sensitive output key from setup. + sensitiveOP := "sa_key" + if strings.Contains(regularLogs.String(), sensitiveOP) { + t.Errorf("regular logs should not contain sensitive output") + } + if !strings.Contains(sensitiveLogs.String(), sensitiveOP) { + t.Errorf("sensitive logs should contain sensitive output") + } + + // Custom plan function not defined, plan should be skipped. + if !strings.Contains(regularLogs.String(), "skipping plan as no function defined") { + t.Errorf("plan should be skipped") + } } diff --git a/infra/blueprint-test/test/terraform_simple_bpt_test.go b/infra/blueprint-test/test/terraform_simple_bpt_test.go index eb959e77074..488243487a2 100644 --- a/infra/blueprint-test/test/terraform_simple_bpt_test.go +++ b/infra/blueprint-test/test/terraform_simple_bpt_test.go @@ -18,6 +18,7 @@ package test import ( "fmt" + "os" "testing" "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/gcloud" @@ -26,9 +27,14 @@ import ( ) func TestCFTSimpleModule(t *testing.T) { + path, _ := os.Getwd() + statePath := fmt.Sprintf("%s/../examples/simple_tf_module/local_backend.tfstate", path) networkBlueprint := tft.NewTFBlueprintTest(t, tft.WithTFDir("../examples/simple_tf_module"), - tft.WithSetupPath("setup/simple_tf_module"), + tft.WithBackendConfig(map[string]interface{}{ + "path": statePath, + }), + tft.WithSetupPath("setup"), ) networkBlueprint.DefineVerify( func(assert *assert.Assertions) { @@ -36,6 +42,10 @@ func TestCFTSimpleModule(t *testing.T) { op := gcloud.Run(t, fmt.Sprintf("compute networks subnets describe subnet-01 --project %s --region us-west1", networkBlueprint.GetStringOutput("project_id"))) assert.Equal("10.10.10.0/24", op.Get("ipCidrRange").String(), "should have the right CIDR") assert.Equal("false", op.Get("logConfig.enable").String(), "logConfig should not be enabled") + assert.FileExists(statePath) + + //test for GetTFSetupStringOutput + assert.Contains(networkBlueprint.GetTFSetupStringOutput("project_id"), "ci-bptest") }) networkBlueprint.Test() } diff --git a/infra/blueprint-test/test/terraform_simple_plan_test.go b/infra/blueprint-test/test/terraform_simple_plan_test.go new file mode 100644 index 00000000000..21f2521f900 --- /dev/null +++ b/infra/blueprint-test/test/terraform_simple_plan_test.go @@ -0,0 +1,48 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package test + +import ( + "testing" + + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/gcloud" + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/tft" + "github.com/gruntwork-io/terratest/modules/terraform" + "github.com/stretchr/testify/assert" + + tfjson "github.com/hashicorp/terraform-json" +) + +func TestPlan(t *testing.T) { + networkBlueprint := tft.NewTFBlueprintTest(t, + tft.WithTFDir("../examples/simple_tf_module"), + tft.WithSetupPath("setup"), + ) + networkBlueprint.DefinePlan(func(ps *terraform.PlanStruct, assert *assert.Assertions) { + assert.Equal(4, len(ps.ResourceChangesMap), "expected 4 resources") + }) + networkBlueprint.DefineVerify( + func(assert *assert.Assertions) { + _, ps := networkBlueprint.PlanAndShow() + for _, r := range ps.ResourceChangesMap { + assert.Equal(tfjson.Actions{tfjson.ActionNoop}, r.Change.Actions, "must be no-op") + } + op := gcloud.Runf(t, "compute networks subnets describe subnet-01 --project %s --region us-west1", networkBlueprint.GetStringOutput("project_id")) + assert.Equal("10.10.10.0/24", op.Get("ipCidrRange").String(), "should have the right CIDR") + }) + networkBlueprint.Test() +} diff --git a/infra/blueprint-test/test/tfvet_test.go b/infra/blueprint-test/test/tfvet_test.go new file mode 100644 index 00000000000..141a801c388 --- /dev/null +++ b/infra/blueprint-test/test/tfvet_test.go @@ -0,0 +1,85 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package test + +import ( + "bytes" + "log" + "os" + "path" + "testing" + + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/tft" + testingiface "github.com/mitchellh/go-testing-interface" + "github.com/stretchr/testify/assert" +) + +func TestTerraformVet(t *testing.T) { + cwd, err := os.Getwd() + if err != nil { + t.Fatalf("unable to get wd :%v", err) + } + libraryPath := path.Join(cwd, "../examples/policy-library") + + for _, tc := range []struct { + name string + service string + errMsgContains string + }{ + { + name: "Valid configuration", + service: "cloudresourcemanager.googleapis.com", + }, + { + name: "Configuration with violations", + service: "oslogin.googleapis.com", + errMsgContains: "GCPServiceUsageConstraintV1", + }, + } { + t.Run(tc.name, func(t *testing.T) { + fakeT := &testingiface.RuntimeT{} + var logOutput bytes.Buffer + log.SetOutput(&logOutput) + vars := map[string]interface{}{"service": tc.service} + + temp := tft.NewTFBlueprintTest(fakeT, + tft.WithVars(vars), + tft.WithTFDir("../examples/tf_vet"), + tft.WithSetupPath("./setup"), + ) + + bpt := tft.NewTFBlueprintTest(fakeT, + tft.WithVars(vars), + tft.WithTFDir("../examples/tf_vet"), + tft.WithSetupPath("./setup"), + tft.WithPolicyLibraryPath(libraryPath, temp.GetTFSetupStringOutput("project_id"))) + bpt.DefineVerify( + func(assert *assert.Assertions) { + bpt.DefaultVerify(assert) + }) + bpt.Test() + + if tc.errMsgContains == "" { + assert.False(t, fakeT.Failed(), "test should be sucessful") + } else { + assert.True(t, fakeT.Failed(), "test should have failed") + assert.Contains(t, logOutput.String(), tc.errMsgContains) + } + + }) + } +} diff --git a/infra/build/Makefile b/infra/build/Makefile index aac408a0fe8..a7e90bae699 100644 --- a/infra/build/Makefile +++ b/infra/build/Makefile @@ -1,4 +1,4 @@ -# Copyright 2019 Google LLC +# Copyright 2019-2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,23 +13,50 @@ # limitations under the License. SHELL := /usr/bin/env bash # Make will use bash instead of sh -TERRAFORM_VERSION := 1.0.6 -TERRAFORM_VALIDATOR_VERSION := 0.9.1 -CLOUD_SDK_VERSION := 356.0.0 +# Updated by Update Tooling Workflow +TERRAFORM_VERSION := 1.10.3 +# Updated by Update Tooling Workflow +CLOUD_SDK_VERSION := 504.0.1 GSUITE_PROVIDER_VERSION := 0.1.22 -TERRAFORM_DOCS_VERSION := 0.10.1 -RUBY_VERSION := 2.6.3 +TERRAFORM_DOCS_VERSION := 0.16.0 BATS_VERSION := 0.4.0 -GOLANG_VERSION := 1.16 BATS_SUPPORT_VERSION := 0.3.0 BATS_ASSERT_VERSION := 2.0.0 BATS_MOCK_VERSION := 1.0.1 PACKER_VERSION := 1.4.4 -TERRAGRUNT_VERSION := 0.25.3 -KUSTOMIZE_VERSION := 3.6.0 -KPT_VERSION := 1.0.0-beta.4 +# Updated by Update Tooling Workflow +TERRAGRUNT_VERSION := 0.71.1 +# Updated by Update Tooling Workflow +KUSTOMIZE_VERSION := 5.5.0 +# Updated by Update Tooling Workflow +KPT_VERSION := 1.0.0-beta.55 +PROTOC_VERSION := 23.4 +PROTOC_GEN_GO_VERSION := 1.31 +PROTOC_GEN_GO_GRPC_VERSION := 1.3 +PROTOC_GEN_GO_INJECT_TAG := 1.4.0 +# Updated by Update Tooling Workflow +CFT_CLI_VERSION := 1.5.12 +# Updated by Update Tooling Workflow +TFLINT_VERSION := 0.54.0 +TINKEY_VERSION := 1.7.0 +ALPINE_VERSION := 3.21 +# Updated by Update Tooling Workflow +MODULE_SWAPPER_VERSION := 0.4.11 +# Updated by Update Tooling Workflow +TFLINT_BP_PLUGIN_VERSION := 0.2.7 -DOCKER_TAG_VERSION_DEVELOPER_TOOLS := 1.2.3 +# For developer-tools-krm +# Updated by Update Tooling Workflow +GOLANGCI_VERSION := 1.63.3 +ASMCLI_VERSION := 1.15 +KIND_VERSION := 0.18.0 +# Updated by Update Tooling Workflow +GATOR_VERSION := 3.17.1 +OPA_VERSION := 0.52.0 +# Updated by Update Tooling Workflow +GCRANE_VERSION := 0.20.2 + +DOCKER_TAG_VERSION_DEVELOPER_TOOLS := 1.23.4 DOCKER_TAG_MAJOR_VERSION_DEVELOPER_TOOLS := $(firstword $(subst ., , $(DOCKER_TAG_VERSION_DEVELOPER_TOOLS))) DOCKER_TAG_MINOR_VERSION_DEVELOPER_TOOLS := $(shell echo "${DOCKER_TAG_VERSION_DEVELOPER_TOOLS}" | awk -F. '{print $$1"."$$2}') @@ -38,6 +65,7 @@ DOCKER_IMAGE_DEVELOPER_KRM := cft/developer-tools-krm DOCKER_IMAGE_DEVELOPER_LIGHT := cft/developer-tools-light DOCKER_IMAGE_DEVELOPER_TOOLS_JENKINS := cft/developer-tools-jenkins REGISTRY_URL := gcr.io/cloud-foundation-cicd +DEP_TAG_PREFIX := infrastructure-public-image- .PHONY: build-image-developer-tools @@ -46,17 +74,25 @@ build-image-developer-tools: --build-arg CLOUD_SDK_VERSION=${CLOUD_SDK_VERSION} \ --build-arg GSUITE_PROVIDER_VERSION=${GSUITE_PROVIDER_VERSION} \ --build-arg TERRAFORM_VERSION=${TERRAFORM_VERSION} \ - --build-arg TERRAFORM_VALIDATOR_VERSION=${TERRAFORM_VALIDATOR_VERSION} \ --build-arg TERRAFORM_DOCS_VERSION=${TERRAFORM_DOCS_VERSION} \ - --build-arg RUBY_VERSION=${RUBY_VERSION} \ --build-arg BATS_VERSION=${BATS_VERSION} \ - --build-arg GOLANG_VERSION=${GOLANG_VERSION} \ --build-arg BATS_SUPPORT_VERSION=${BATS_SUPPORT_VERSION} \ --build-arg BATS_ASSERT_VERSION=${BATS_ASSERT_VERSION} \ --build-arg BATS_MOCK_VERSION=${BATS_MOCK_VERSION} \ --build-arg TERRAGRUNT_VERSION=${TERRAGRUNT_VERSION} \ --build-arg KUSTOMIZE_VERSION=${KUSTOMIZE_VERSION} \ --build-arg KPT_VERSION=${KPT_VERSION} \ + --build-arg PROTOC_VERSION=${PROTOC_VERSION} \ + --build-arg PROTOC_GEN_GO_VERSION=${PROTOC_GEN_GO_VERSION} \ + --build-arg PROTOC_GEN_GO_GRPC_VERSION=${PROTOC_GEN_GO_GRPC_VERSION} \ + --build-arg PROTOC_GEN_GO_INJECT_TAG=${PROTOC_GEN_GO_INJECT_TAG} \ + --build-arg CFT_CLI_VERSION=${CFT_CLI_VERSION} \ + --build-arg KUBECTL_VERSION=${KUBECTL_VERSION} \ + --build-arg TFLINT_VERSION=${TFLINT_VERSION} \ + --build-arg TINKEY_VERSION=${TINKEY_VERSION} \ + --build-arg ALPINE_VERSION=${ALPINE_VERSION} \ + --build-arg MODULE_SWAPPER_VERSION=${MODULE_SWAPPER_VERSION} \ + --build-arg TFLINT_BP_PLUGIN_VERSION=${TFLINT_BP_PLUGIN_VERSION} \ -t ${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} "./developer-tools" docker build \ --build-arg BASE_IMAGE_VERSION=${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ @@ -65,61 +101,150 @@ build-image-developer-tools: docker build . \ --build-arg TERRAFORM_VERSION=${TERRAFORM_VERSION} \ --build-arg CLOUD_SDK_VERSION=${CLOUD_SDK_VERSION} \ + --build-arg ALPINE_VERSION=${ALPINE_VERSION} \ -t ${DOCKER_IMAGE_DEVELOPER_LIGHT}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} -f "./developer-tools-light/Dockerfile" docker build \ + --build-arg GOLANGCI_VERSION=${GOLANGCI_VERSION} \ + --build-arg ASMCLI_VERSION=${ASMCLI_VERSION} \ + --build-arg GATOR_VERSION=${GATOR_VERSION} \ + --build-arg OPA_VERSION=${OPA_VERSION} \ + --build-arg KIND_VERSION=${KIND_VERSION} \ + --build-arg GCRANE_VERSION=${GCRANE_VERSION} \ --build-arg BASE_IMAGE_VERSION=${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ -t ${DOCKER_IMAGE_DEVELOPER_KRM}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} "./developer-tools-krm" .PHONY: release-image-developer-tools release-image-developer-tools: + test -n "$(GITHUB_SHA)" # $$GITHUB_SHA + docker tag ${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} docker push ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} + docker tag ${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ + ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DEP_TAG_PREFIX}${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} + docker push ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DEP_TAG_PREFIX}${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} + docker tag ${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_MAJOR_VERSION_DEVELOPER_TOOLS} docker push ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_MAJOR_VERSION_DEVELOPER_TOOLS} + docker tag ${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ + ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DEP_TAG_PREFIX}${DOCKER_TAG_MAJOR_VERSION_DEVELOPER_TOOLS} + docker push ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DEP_TAG_PREFIX}${DOCKER_TAG_MAJOR_VERSION_DEVELOPER_TOOLS} + docker tag ${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_MINOR_VERSION_DEVELOPER_TOOLS} docker push ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_MINOR_VERSION_DEVELOPER_TOOLS} + docker tag ${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ + ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DEP_TAG_PREFIX}${DOCKER_TAG_MINOR_VERSION_DEVELOPER_TOOLS} + docker push ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DEP_TAG_PREFIX}${DOCKER_TAG_MINOR_VERSION_DEVELOPER_TOOLS} + + docker tag ${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ + ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${GITHUB_SHA} + docker push ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${GITHUB_SHA} + + docker tag ${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ + ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DEP_TAG_PREFIX}${GITHUB_SHA} + docker push ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DEP_TAG_PREFIX}${GITHUB_SHA} + docker tag ${DOCKER_IMAGE_DEVELOPER_TOOLS_JENKINS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_TOOLS_JENKINS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} docker push ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_TOOLS_JENKINS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} + docker tag ${DOCKER_IMAGE_DEVELOPER_TOOLS_JENKINS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ + ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_TOOLS_JENKINS}:${DEP_TAG_PREFIX}${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} + docker push ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_TOOLS_JENKINS}:${DEP_TAG_PREFIX}${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} + docker tag ${DOCKER_IMAGE_DEVELOPER_TOOLS_JENKINS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_TOOLS_JENKINS}:${DOCKER_TAG_MAJOR_VERSION_DEVELOPER_TOOLS} docker push ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_TOOLS_JENKINS}:${DOCKER_TAG_MAJOR_VERSION_DEVELOPER_TOOLS} + docker tag ${DOCKER_IMAGE_DEVELOPER_TOOLS_JENKINS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ + ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_TOOLS_JENKINS}:${DEP_TAG_PREFIX}${DOCKER_TAG_MAJOR_VERSION_DEVELOPER_TOOLS} + docker push ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_TOOLS_JENKINS}:${DEP_TAG_PREFIX}${DOCKER_TAG_MAJOR_VERSION_DEVELOPER_TOOLS} + docker tag ${DOCKER_IMAGE_DEVELOPER_TOOLS_JENKINS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_TOOLS_JENKINS}:${DOCKER_TAG_MINOR_VERSION_DEVELOPER_TOOLS} docker push ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_TOOLS_JENKINS}:${DOCKER_TAG_MINOR_VERSION_DEVELOPER_TOOLS} + docker tag ${DOCKER_IMAGE_DEVELOPER_TOOLS_JENKINS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ + ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_TOOLS_JENKINS}:${DEP_TAG_PREFIX}${DOCKER_TAG_MINOR_VERSION_DEVELOPER_TOOLS} + docker push ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_TOOLS_JENKINS}:${DEP_TAG_PREFIX}${DOCKER_TAG_MINOR_VERSION_DEVELOPER_TOOLS} + + docker tag ${DOCKER_IMAGE_DEVELOPER_TOOLS_JENKINS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ + ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_TOOLS_JENKINS}:${GITHUB_SHA} + docker push ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_TOOLS_JENKINS}:${GITHUB_SHA} + + docker tag ${DOCKER_IMAGE_DEVELOPER_TOOLS_JENKINS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ + ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_TOOLS_JENKINS}:${DEP_TAG_PREFIX}${GITHUB_SHA} + docker push ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_TOOLS_JENKINS}:${DEP_TAG_PREFIX}${GITHUB_SHA} + docker tag ${DOCKER_IMAGE_DEVELOPER_LIGHT}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_LIGHT}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} docker push ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_LIGHT}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} + docker tag ${DOCKER_IMAGE_DEVELOPER_LIGHT}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ + ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_LIGHT}:${DEP_TAG_PREFIX}${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} + docker push ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_LIGHT}:${DEP_TAG_PREFIX}${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} + docker tag ${DOCKER_IMAGE_DEVELOPER_LIGHT}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_LIGHT}:${DOCKER_TAG_MAJOR_VERSION_DEVELOPER_TOOLS} docker push ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_LIGHT}:${DOCKER_TAG_MAJOR_VERSION_DEVELOPER_TOOLS} + docker tag ${DOCKER_IMAGE_DEVELOPER_LIGHT}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ + ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_LIGHT}:${DEP_TAG_PREFIX}${DOCKER_TAG_MAJOR_VERSION_DEVELOPER_TOOLS} + docker push ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_LIGHT}:${DEP_TAG_PREFIX}${DOCKER_TAG_MAJOR_VERSION_DEVELOPER_TOOLS} + docker tag ${DOCKER_IMAGE_DEVELOPER_LIGHT}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_LIGHT}:${DOCKER_TAG_MINOR_VERSION_DEVELOPER_TOOLS} docker push ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_LIGHT}:${DOCKER_TAG_MINOR_VERSION_DEVELOPER_TOOLS} + docker tag ${DOCKER_IMAGE_DEVELOPER_LIGHT}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ + ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_LIGHT}:${DEP_TAG_PREFIX}${DOCKER_TAG_MINOR_VERSION_DEVELOPER_TOOLS} + docker push ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_LIGHT}:${DEP_TAG_PREFIX}${DOCKER_TAG_MINOR_VERSION_DEVELOPER_TOOLS} + + docker tag ${DOCKER_IMAGE_DEVELOPER_LIGHT}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ + ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_LIGHT}:${GITHUB_SHA} + docker push ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_LIGHT}:${GITHUB_SHA} + + docker tag ${DOCKER_IMAGE_DEVELOPER_LIGHT}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ + ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_LIGHT}:${DEP_TAG_PREFIX}${GITHUB_SHA} + docker push ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_LIGHT}:${DEP_TAG_PREFIX}${GITHUB_SHA} + docker tag ${DOCKER_IMAGE_DEVELOPER_KRM}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_KRM}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} docker push ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_KRM}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} + docker tag ${DOCKER_IMAGE_DEVELOPER_KRM}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ + ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_KRM}:${DEP_TAG_PREFIX}${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} + docker push ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_KRM}:${DEP_TAG_PREFIX}${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} + docker tag ${DOCKER_IMAGE_DEVELOPER_KRM}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_KRM}:${DOCKER_TAG_MAJOR_VERSION_DEVELOPER_TOOLS} docker push ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_KRM}:${DOCKER_TAG_MAJOR_VERSION_DEVELOPER_TOOLS} + docker tag ${DOCKER_IMAGE_DEVELOPER_KRM}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ + ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_KRM}:${DEP_TAG_PREFIX}${DOCKER_TAG_MAJOR_VERSION_DEVELOPER_TOOLS} + docker push ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_KRM}:${DEP_TAG_PREFIX}${DOCKER_TAG_MAJOR_VERSION_DEVELOPER_TOOLS} + docker tag ${DOCKER_IMAGE_DEVELOPER_KRM}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_KRM}:${DOCKER_TAG_MINOR_VERSION_DEVELOPER_TOOLS} docker push ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_KRM}:${DOCKER_TAG_MINOR_VERSION_DEVELOPER_TOOLS} + docker tag ${DOCKER_IMAGE_DEVELOPER_KRM}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ + ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_KRM}:${DEP_TAG_PREFIX}${DOCKER_TAG_MINOR_VERSION_DEVELOPER_TOOLS} + docker push ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_KRM}:${DEP_TAG_PREFIX}${DOCKER_TAG_MINOR_VERSION_DEVELOPER_TOOLS} + + docker tag ${DOCKER_IMAGE_DEVELOPER_KRM}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ + ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_KRM}:${GITHUB_SHA} + docker push ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_KRM}:${GITHUB_SHA} + + docker tag ${DOCKER_IMAGE_DEVELOPER_KRM}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ + ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_KRM}:${DEP_TAG_PREFIX}${GITHUB_SHA} + docker push ${REGISTRY_URL}/${DOCKER_IMAGE_DEVELOPER_KRM}:${DEP_TAG_PREFIX}${GITHUB_SHA} + # The following tasks are provided as examples and aren't invoked from this # repository directly. These three make targets are the only targets that # should be included in every CFT module's `Makefile` diff --git a/infra/build/developer-tools-krm/Dockerfile b/infra/build/developer-tools-krm/Dockerfile index 194071202d4..a963e991237 100644 --- a/infra/build/developer-tools-krm/Dockerfile +++ b/infra/build/developer-tools-krm/Dockerfile @@ -15,11 +15,56 @@ ARG BASE_IMAGE_VERSION FROM cft/developer-tools:$BASE_IMAGE_VERSION -RUN apk update && apk add --no-cache openrc docker-cli docker screen nodejs-current nodejs-npm +RUN apk update && apk add --no-cache openrc docker-cli docker screen nodejs-current npm yamllint util-linux-misc libc6-compat ansible-lint py3-jsonschema pipx -# Additional go tooling -RUN GO111MODULE=on go get github.com/golangci/golangci-lint/cmd/golangci-lint@v1.40.1 +# Required to download and install golangci-lint +ARG GOLANGCI_VERSION +ENV GOLANGCI_VERSION ${GOLANGCI_VERSION} + +ADD ./build/install_golangci_lint.sh /build/ +RUN /build/install_golangci_lint.sh ${GOLANGCI_VERSION} + +# Required to download and install asmcli +ARG ASMCLI_VERSION +ENV ASMCLI_VERSION ${ASMCLI_VERSION} + +ADD ./build/install_asmcli.sh /build/ +RUN /build/install_asmcli.sh ${ASMCLI_VERSION} + +# Required to download and install gator +ARG GATOR_VERSION +ENV GATOR_VERSION ${GATOR_VERSION} + +ADD ./build/install_gator.sh /build/ +RUN /build/install_gator.sh ${GATOR_VERSION} + +# Required to download and install OPA +ARG OPA_VERSION +ENV OPA_VERSION ${OPA_VERSION} + +ADD ./build/install_opa.sh /build/ +RUN /build/install_opa.sh ${OPA_VERSION} + +# Required to download and install kind +ARG KIND_VERSION +ENV KIND_VERSION ${KIND_VERSION} + +ADD ./build/install_kind.sh /build/ +RUN /build/install_kind.sh ${KIND_VERSION} + +# Required to download and install crane +ARG GCRANE_VERSION +ENV GCRANE_VERSION ${GCRANE_VERSION} + +ADD ./build/install_gcrane.sh /build/ +RUN /build/install_gcrane.sh ${GCRANE_VERSION} + +RUN rm -rf /build # Add dind helper for prow ADD ./build/scripts/runner.sh /usr/local/bin/ ADD ./build/scripts/prow_entrypoint.sh /usr/local/bin/ + +# cookiecutter is used for terraform-google-module-template tests +# https://github.com/terraform-google-modules/terraform-google-module-template +RUN pipx install cookiecutter diff --git a/infra/build/developer-tools-krm/build/install_asmcli.sh b/infra/build/developer-tools-krm/build/install_asmcli.sh new file mode 100755 index 00000000000..4a366651b3d --- /dev/null +++ b/infra/build/developer-tools-krm/build/install_asmcli.sh @@ -0,0 +1,27 @@ +#! /bin/bash +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e +set -u + +mkdir -p /build/install_asmcli +cd /build/install_asmcli + +ASMCLI_VERSION=$1 + +curl https://storage.googleapis.com/csm-artifacts/asm/asmcli_${ASMCLI_VERSION} > asmcli +install -o 0 -g 0 -m 0755 asmcli /usr/local/bin/asmcli + +rm -rf /build/install_asmcli diff --git a/infra/build/developer-tools-krm/build/install_gator.sh b/infra/build/developer-tools-krm/build/install_gator.sh new file mode 100755 index 00000000000..dc8b9fe0d69 --- /dev/null +++ b/infra/build/developer-tools-krm/build/install_gator.sh @@ -0,0 +1,28 @@ +#! /bin/bash +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e +set -u + +mkdir -p /build/install_gator +cd /build/install_gator + +GATOR_VERSION=$1 + +wget -nv "https://github.com/open-policy-agent/gatekeeper/releases/download/v${GATOR_VERSION}/gator-v${GATOR_VERSION}-linux-amd64.tar.gz" +tar -xf gator-v${GATOR_VERSION}-linux-amd64.tar.gz +install -o 0 -g 0 -m 0755 gator /usr/local/bin/gator + +rm -rf /build/install_gator diff --git a/infra/build/developer-tools-krm/build/install_gcrane.sh b/infra/build/developer-tools-krm/build/install_gcrane.sh new file mode 100755 index 00000000000..13e56a8bd8a --- /dev/null +++ b/infra/build/developer-tools-krm/build/install_gcrane.sh @@ -0,0 +1,24 @@ +#! /bin/bash +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e +set -u + +cd /build + +GCRANE_VERSION=$1 + +go install github.com/google/go-containerregistry/cmd/gcrane@v$GCRANE_VERSION +ln -s $(go env GOPATH)/bin/gcrane /usr/local/bin/ diff --git a/infra/build/developer-tools-krm/build/install_golangci_lint.sh b/infra/build/developer-tools-krm/build/install_golangci_lint.sh new file mode 100755 index 00000000000..8e6ada2d088 --- /dev/null +++ b/infra/build/developer-tools-krm/build/install_golangci_lint.sh @@ -0,0 +1,24 @@ +#! /bin/bash +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e +set -u + +cd /build + +GOLANGCI_VERSION=$1 + +go install github.com/golangci/golangci-lint/cmd/golangci-lint@v$GOLANGCI_VERSION +ln -s $(go env GOPATH)/bin/golangci-lint /usr/local/bin/ diff --git a/infra/build/developer-tools-krm/build/install_kind.sh b/infra/build/developer-tools-krm/build/install_kind.sh new file mode 100755 index 00000000000..e1dc9fcf80b --- /dev/null +++ b/infra/build/developer-tools-krm/build/install_kind.sh @@ -0,0 +1,27 @@ +#! /bin/bash +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e +set -u + +mkdir -p /build/install_kind +cd /build/install_kind + +KIND_VERSION=$1 + +wget -nv "https://kind.sigs.k8s.io/dl/v${KIND_VERSION}/kind-linux-amd64" +install -o 0 -g 0 -m 0755 kind-linux-amd64 /usr/local/bin/kind + +rm -rf /build/install_kind diff --git a/infra/build/developer-tools-krm/build/install_opa.sh b/infra/build/developer-tools-krm/build/install_opa.sh new file mode 100755 index 00000000000..da34e29b75b --- /dev/null +++ b/infra/build/developer-tools-krm/build/install_opa.sh @@ -0,0 +1,27 @@ +#! /bin/bash +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e +set -u + +mkdir -p /build/install_opa +cd /build/install_opa + +OPA_VERSION=$1 + +wget -nv "https://openpolicyagent.org/downloads/v${OPA_VERSION}/opa_linux_amd64_static" -O opa +install -o 0 -g 0 -m 0755 opa /usr/local/bin/opa + +rm -rf /build/install_opa diff --git a/infra/build/developer-tools-light/Dockerfile b/infra/build/developer-tools-light/Dockerfile index 8a616bc284e..94afb1d40c7 100644 --- a/infra/build/developer-tools-light/Dockerfile +++ b/infra/build/developer-tools-light/Dockerfile @@ -1,4 +1,4 @@ -# Copyright 2021 Google LLC +# Copyright 2021-2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,22 +13,15 @@ # limitations under the License. # Download and verify the integrity of the download first -FROM sethvargo/hashicorp-installer:0.2.0 AS installer -# Required to download and install Terraform -ARG TERRAFORM_VERSION -ENV TERRAFORM_VERSION ${TERRAFORM_VERSION} -RUN /install-hashicorp-tool "terraform" "$TERRAFORM_VERSION" - -FROM alpine:3.13 +ARG ALPINE_VERSION +FROM alpine:$ALPINE_VERSION RUN apk update && apk add --no-cache \ bash \ git \ wget \ python3 \ - jq - -# Install Terraform -COPY --from=installer /software/terraform /usr/local/bin/terraform + jq \ + go # Install cloud SDK ENV PATH /usr/local/google-cloud-sdk/bin:$PATH @@ -36,6 +29,13 @@ ARG CLOUD_SDK_VERSION ENV CLOUD_SDK_VERSION ${CLOUD_SDK_VERSION} ADD developer-tools/build/install_cloud_sdk.sh /build/ RUN /build/install_cloud_sdk.sh ${CLOUD_SDK_VERSION} + +# Required to download and install Terraform +ARG TERRAFORM_VERSION +ENV TERRAFORM_VERSION ${TERRAFORM_VERSION} +ADD developer-tools/build/install_terraform.sh /build/ +RUN /build/install_terraform.sh ${TERRAFORM_VERSION} + RUN rm -rf /build RUN terraform version && gcloud version diff --git a/infra/build/developer-tools/Dockerfile b/infra/build/developer-tools/Dockerfile index be78af2ed14..911bf861015 100644 --- a/infra/build/developer-tools/Dockerfile +++ b/infra/build/developer-tools/Dockerfile @@ -1,4 +1,4 @@ -# Copyright 2019 Google LLC +# Copyright 2019-2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,141 +12,115 @@ # See the License for the specific language governing permissions and # limitations under the License. # Download and verify the integrity of the download first -ARG RUBY_VERSION -ARG GOLANG_VERSION - -FROM sethvargo/hashicorp-installer:0.2.0 AS installer -# Required to download and install Terraform -ARG TERRAFORM_VERSION -ENV TERRAFORM_VERSION ${TERRAFORM_VERSION} -RUN /install-hashicorp-tool "terraform" "$TERRAFORM_VERSION" - -# Builds module-swapper -FROM golang:$GOLANG_VERSION-alpine AS module-swapper-builder -WORKDIR /go/src/github.com/GoogleCloudPlatform/infra/developer-tools/build/scripts/module-swapper -COPY ./build/scripts/module-swapper ./ -RUN go build -v -o /usr/local/bin/module-swapper - -FROM golang:$GOLANG_VERSION-alpine AS go - -FROM ruby:$RUBY_VERSION-alpine - -# Required to download and install terraform-docs -ARG TERRAFORM_DOCS_VERSION -ENV TERRAFORM_DOCS_VERSION ${TERRAFORM_DOCS_VERSION} - -# Required to download and install the Terraform gsuite provider -ARG GSUITE_PROVIDER_VERSION -ENV GSUITE_PROVIDER_VERSION ${GSUITE_PROVIDER_VERSION} - -# Required to download and install Google Cloud SDK -# Google Cloud SDK is located at /usr/local/google-cloud-sdk -ARG CLOUD_SDK_VERSION -ENV CLOUD_SDK_VERSION ${CLOUD_SDK_VERSION} +ARG ALPINE_VERSION +FROM alpine:$ALPINE_VERSION # Required to override base path ARG KITCHEN_TEST_BASE_PATH="test/integration" ENV KITCHEN_TEST_BASE_PATH ${KITCHEN_TEST_BASE_PATH} -# Required to download and install Bats -ARG BATS_VERSION -ENV BATS_VERSION ${BATS_VERSION} - -# Required to download and install Bats-support -ARG BATS_SUPPORT_VERSION -ENV BATS_SUPPORT_VERSION ${BATS_SUPPORT_VERSION} - -# Required to download and install Bats-assert -ARG BATS_ASSERT_VERSION -ENV BATS_ASSERT_VERSION ${BATS_ASSERT_VERSION} - -# Required to download and install Bats-mock -ARG BATS_MOCK_VERSION -ENV BATS_MOCK_VERSION ${BATS_MOCK_VERSION} - -ARG TERRAGRUNT_VERSION -ENV TERRAGRUNT_VERSION ${TERRAGRUNT_VERSION} - -# Required to download and install Kustomize -ARG KUSTOMIZE_VERSION -ENV KUSTOMIZE_VERSION ${KUSTOMIZE_VERSION} - -# Required to download and install Kpt -ARG KPT_VERSION -ENV KPT_VERSION ${KPT_VERSION} - # $WORKSPACE is intended for assets that persist across multiple build steps in a pipeline. # It's also where the project git repository is located. # https://cloud.google.com/cloud-build/docs/build-config ENV WORKSPACE="/workspace" ENV PATH /usr/local/google-cloud-sdk/bin:$PATH -# bash is preferred over /bin/sh -RUN apk add --no-cache bash parallel - -# All package-level dependencies are handled by install_dependencies.sh -ADD ./build/install_dependencies.sh /build/ -RUN /build/install_dependencies.sh - -ADD ./build/install_cloud_sdk.sh /build/ +# Install Distro Dependencies +# diffutils contains the full version of diff needed for the -exclude argument. +# That argument is needed for check_documentation in task_helper_functions.sh +# rsync is needed for check_documentation in task_helper_functions.sh +# install gcompat, because protoc needs a real glibc or compatible layer +RUN apk upgrade --no-cache && \ + apk add --no-cache \ + bash \ + parallel \ + coreutils \ + curl \ + findutils \ + git \ + grep \ + g++ \ + jq \ + make \ + python3-dev \ + musl-dev \ + openssh \ + openssl \ + python3 \ + py-pip \ + ca-certificates \ + diffutils \ + rsync \ + gcompat \ + gettext \ + libintl \ + py3-crcmod \ + py3-flake8 \ + py3-jinja2 \ + py3-google-auth \ + py3-google-api-python-client \ + py3-pygithub \ + py3-requests \ + ruby-dev \ + go \ + kubectl + +# Add all build scripts +ADD ./build/*.sh /build/ + +# Add cloud sdk first, gsutil required by some installers +ARG CLOUD_SDK_VERSION RUN /build/install_cloud_sdk.sh ${CLOUD_SDK_VERSION} -ADD ./build/install_gsuite_terraform_provider.sh /build/ -RUN /build/install_gsuite_terraform_provider.sh ${GSUITE_PROVIDER_VERSION} - - -# Required to download and install Terraform Validator -ARG TERRAFORM_VALIDATOR_VERSION -ENV TERRAFORM_VALIDATOR_VERSION ${TERRAFORM_VALIDATOR_VERSION} -ADD ./build/install_terraform_validator.sh /build/ -RUN /build/install_terraform_validator.sh ${TERRAFORM_VALIDATOR_VERSION} - -#ADD ./build/install_terraform.sh /build/ -#RUN /build/install_terraform.sh ${TERRAFORM_VERSION} -COPY --from=installer /software/terraform /usr/local/bin/terraform - -# Install Golang -COPY --from=go /usr/local/go/ /usr/local/go/ -RUN ln -s /usr/local/go/bin/go /usr/local/bin/go -RUN ln -s /usr/local/go/bin/gofmt /usr/local/bin/gofmt - -# Install module swapper -COPY --from=module-swapper-builder /usr/local/bin/module-swapper /usr/local/bin/module-swapper - -ADD ./build/install_shellcheck.sh /build/ -RUN /build/install_shellcheck.sh - -ADD ./build/install_hadolint.sh /build/ -RUN /build/install_hadolint.sh - -ADD ./build/install_terraform_docs.sh /build/ -RUN /build/install_terraform_docs.sh ${TERRAFORM_DOCS_VERSION} - -ADD ./build/install_bats.sh /build/ -RUN /build/install_bats.sh ${BATS_VERSION} ${BATS_SUPPORT_VERSION} ${BATS_ASSERT_VERSION} ${BATS_MOCK_VERSION} - -ADD ./build/install_kubectl.sh /build/ -RUN /build/install_kubectl.sh +# Optimized to minimize layers +ARG GSUITE_PROVIDER_VERSION +ARG TERRAFORM_DOCS_VERSION +ARG BATS_VERSION +ARG BATS_SUPPORT_VERSION +ARG BATS_ASSERT_VERSION +ARG BATS_MOCK_VERSION +ARG TERRAGRUNT_VERSION +ARG KUSTOMIZE_VERSION +ARG PROTOC_VERSION +ARG PROTOC_GEN_GO_VERSION +ARG PROTOC_GEN_GO_GRPC_VERSION +ARG PROTOC_GEN_GO_INJECT_TAG +ARG TFLINT_VERSION +ARG TINKEY_VERSION +RUN /build/install_shellcheck.sh && \ + /build/install_hadolint.sh && \ + /build/install_addlicense.sh && \ + /build/install_gsuite_terraform_provider.sh ${GSUITE_PROVIDER_VERSION} && \ + /build/install_terraform_docs.sh ${TERRAFORM_DOCS_VERSION} && \ + /build/install_bats.sh ${BATS_VERSION} ${BATS_SUPPORT_VERSION} ${BATS_ASSERT_VERSION} ${BATS_MOCK_VERSION} && \ + /build/install_terragrunt.sh ${TERRAGRUNT_VERSION} && \ + /build/install_kustomize.sh ${KUSTOMIZE_VERSION} && \ + /build/install_protoc.sh ${PROTOC_VERSION} ${PROTOC_GEN_GO_VERSION} ${PROTOC_GEN_GO_GRPC_VERSION} ${PROTOC_GEN_GO_INJECT_TAG} && \ + /build/install_tflint.sh ${TFLINT_VERSION} && \ + /build/install_tinkey.sh ${TINKEY_VERSION} + +# Optimized to independently cache layers for commonly updated tools: +# https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/blob/master/.github/workflows/update-tooling.yml +ARG TERRAFORM_VERSION +RUN /build/install_terraform.sh ${TERRAFORM_VERSION} -ADD ./build/install_terragrunt.sh /build/ -RUN ./build/install_terragrunt.sh ${TERRAGRUNT_VERSION} +ARG KPT_VERSION +RUN /build/install_kpt.sh ${KPT_VERSION} -ADD ./build/install_kustomize.sh /build/ -RUN ./build/install_kustomize.sh ${KUSTOMIZE_VERSION} +ARG CFT_CLI_VERSION +RUN /build/install_cft_cli.sh ${CFT_CLI_VERSION} -ADD ./build/install_kpt.sh /build/ -RUN ./build/install_kpt.sh ${KPT_VERSION} +ARG MODULE_SWAPPER_VERSION +RUN /build/install_module-swapper.sh ${MODULE_SWAPPER_VERSION} -ADD ./build/install_addlicense.sh /build/ -RUN /build/install_addlicense.sh +ARG TFLINT_BP_PLUGIN_VERSION +RUN /build/install_tflint_plugin.sh ${TFLINT_BP_PLUGIN_VERSION} WORKDIR /opt/kitchen ADD ./build/data/Gemfile . ADD ./build/data/Gemfile.lock . -ADD ./build/data/requirements.txt . -RUN bundle install && pip3 install -r requirements.txt +RUN gem install bundler:2.5.10 && bundle install --retry=10 -COPY ./build/install_verify_boilerplate.sh /build/ COPY ./build/verify_boilerplate/ /build/verify_boilerplate/ RUN /build/install_verify_boilerplate.sh @@ -173,27 +147,32 @@ ADD ./build/scripts/gh_lint_comment.py /usr/local/bin/ ADD ./build/home/bash_history /root/.bash_history ADD ./build/home/bashrc /root/.bashrc +# TFLint configs +ADD ./build/home/.tflint.example.hcl /root/tflint/.tflint.example.hcl +ADD ./build/home/.tflint.module.hcl /root/tflint/.tflint.module.hcl + # Set TF cache dir ENV TF_PLUGIN_CACHE_DIR /workspace/test/integration/tmp/.terraform RUN mkdir -p ${TF_PLUGIN_CACHE_DIR} -# Set TF Validator temporary plan dir -ENV TF_VALIDATOR_TMP_PLAN_DIR /workspace/test/integration/tmp/tfvt/ -RUN mkdir -p ${TF_VALIDATOR_TMP_PLAN_DIR} +# Add GO_PATH TO PATH +ENV GO_PATH="/root/go" +ENV PATH=$GO_PATH/bin:$PATH WORKDIR $WORKSPACE RUN terraform --version && \ terraform-docs --version && \ - terraform-validator version && \ gcloud --version && \ ruby --version && \ bundle --version && \ kubectl version --client=true && \ terragrunt -version && \ kustomize version && \ - addlicense -help + addlicense -help && \ + cft version && \ + protoc --version && \ + go version # Cleanup intermediate build artifacts RUN rm -rf /build -RUN go version CMD ["/bin/bash"] diff --git a/infra/build/developer-tools/README.md b/infra/build/developer-tools/README.md index 54fb74c949b..5f0e3db3c9d 100644 --- a/infra/build/developer-tools/README.md +++ b/infra/build/developer-tools/README.md @@ -15,7 +15,7 @@ Review the `Makefile` to identify other variable inputs to the build and release ## Environment Variables The following environment variables are inputs to the running container. -Enviornment variables also implement feature flags to enable or disable +Environment variables also implement feature flags to enable or disable behavior. These variables are considered a public API and interface into the running container instance. diff --git a/infra/build/developer-tools/build/data/Gemfile b/infra/build/developer-tools/build/data/Gemfile index 22250f46b7d..13093786583 100644 --- a/infra/build/developer-tools/build/data/Gemfile +++ b/infra/build/developer-tools/build/data/Gemfile @@ -1,4 +1,4 @@ -# Copyright 2018 Google LLC +# Copyright 2018-2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -source 'https://rubygems.org/' do - gem "kitchen-terraform", "~> 5.8" - gem "kubeclient", "~> 4.0" - gem "rest-client", "~> 2.0" - gem 'nokogiri', '~> 1.11' -end +source 'https://rubygems.org/' +gem "kitchen-terraform", "~> 7.0" +gem "kubeclient", "~> 4.11" +gem "rest-client", "~> 2.1" +gem 'nokogiri', '~> 1.16' +ruby '~> 3.3.1' diff --git a/infra/build/developer-tools/build/data/Gemfile.lock b/infra/build/developer-tools/build/data/Gemfile.lock index 44ca18a5bcc..79395a191cb 100644 --- a/infra/build/developer-tools/build/data/Gemfile.lock +++ b/infra/build/developer-tools/build/data/Gemfile.lock @@ -1,364 +1,476 @@ GEM remote: https://rubygems.org/ specs: - activesupport (5.2.4.4) + activesupport (7.0.7.2) concurrent-ruby (~> 1.0, >= 1.0.2) - i18n (>= 0.7, < 2) - minitest (~> 5.1) - tzinfo (~> 1.1) - addressable (2.8.0) - public_suffix (>= 2.0.2, < 5.0) - aws-eventstream (1.1.0) - aws-partitions (1.381.0) - aws-sdk-apigateway (1.55.0) - aws-sdk-core (~> 3, >= 3.109.0) + i18n (>= 1.6, < 2) + minitest (>= 5.1) + tzinfo (~> 2.0) + addressable (2.8.5) + public_suffix (>= 2.0.2, < 6.0) + ast (2.4.2) + aws-eventstream (1.2.0) + aws-partitions (1.816.0) + aws-sdk-account (1.17.0) + aws-sdk-core (~> 3, >= 3.177.0) aws-sigv4 (~> 1.1) - aws-sdk-apigatewayv2 (1.29.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-alexaforbusiness (1.64.0) + aws-sdk-core (~> 3, >= 3.177.0) aws-sigv4 (~> 1.1) - aws-sdk-athena (1.33.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-amplify (1.32.0) + aws-sdk-core (~> 3, >= 3.120.0) aws-sigv4 (~> 1.1) - aws-sdk-autoscaling (1.22.0) - aws-sdk-core (~> 3, >= 3.52.1) + aws-sdk-apigateway (1.87.0) + aws-sdk-core (~> 3, >= 3.177.0) aws-sigv4 (~> 1.1) - aws-sdk-budgets (1.35.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-apigatewayv2 (1.50.0) + aws-sdk-core (~> 3, >= 3.177.0) aws-sigv4 (~> 1.1) - aws-sdk-cloudformation (1.44.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-applicationautoscaling (1.51.0) + aws-sdk-core (~> 3, >= 3.112.0) aws-sigv4 (~> 1.1) - aws-sdk-cloudfront (1.43.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-athena (1.74.0) + aws-sdk-core (~> 3, >= 3.177.0) aws-sigv4 (~> 1.1) - aws-sdk-cloudhsm (1.27.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-autoscaling (1.92.0) + aws-sdk-core (~> 3, >= 3.176.0) aws-sigv4 (~> 1.1) - aws-sdk-cloudhsmv2 (1.30.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-batch (1.73.0) + aws-sdk-core (~> 3, >= 3.176.0) aws-sigv4 (~> 1.1) - aws-sdk-cloudtrail (1.29.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-budgets (1.58.0) + aws-sdk-core (~> 3, >= 3.177.0) aws-sigv4 (~> 1.1) - aws-sdk-cloudwatch (1.45.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-cloudformation (1.88.0) + aws-sdk-core (~> 3, >= 3.177.0) aws-sigv4 (~> 1.1) - aws-sdk-cloudwatchlogs (1.38.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-cloudfront (1.82.0) + aws-sdk-core (~> 3, >= 3.177.0) aws-sigv4 (~> 1.1) - aws-sdk-codecommit (1.40.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-cloudhsm (1.47.0) + aws-sdk-core (~> 3, >= 3.177.0) aws-sigv4 (~> 1.1) - aws-sdk-codedeploy (1.37.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-cloudhsmv2 (1.49.0) + aws-sdk-core (~> 3, >= 3.177.0) aws-sigv4 (~> 1.1) - aws-sdk-codepipeline (1.37.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-cloudtrail (1.68.0) + aws-sdk-core (~> 3, >= 3.177.0) aws-sigv4 (~> 1.1) - aws-sdk-configservice (1.53.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-cloudwatch (1.80.0) + aws-sdk-core (~> 3, >= 3.177.0) aws-sigv4 (~> 1.1) - aws-sdk-core (3.109.1) + aws-sdk-cloudwatchevents (1.62.0) + aws-sdk-core (~> 3, >= 3.176.0) + aws-sigv4 (~> 1.1) + aws-sdk-cloudwatchlogs (1.69.0) + aws-sdk-core (~> 3, >= 3.177.0) + aws-sigv4 (~> 1.1) + aws-sdk-codecommit (1.59.0) + aws-sdk-core (~> 3, >= 3.177.0) + aws-sigv4 (~> 1.1) + aws-sdk-codedeploy (1.57.0) + aws-sdk-core (~> 3, >= 3.177.0) + aws-sigv4 (~> 1.1) + aws-sdk-codepipeline (1.61.0) + aws-sdk-core (~> 3, >= 3.177.0) + aws-sigv4 (~> 1.1) + aws-sdk-cognitoidentity (1.45.0) + aws-sdk-core (~> 3, >= 3.176.0) + aws-sigv4 (~> 1.1) + aws-sdk-cognitoidentityprovider (1.76.0) + aws-sdk-core (~> 3, >= 3.176.0) + aws-sigv4 (~> 1.1) + aws-sdk-configservice (1.97.0) + aws-sdk-core (~> 3, >= 3.177.0) + aws-sigv4 (~> 1.1) + aws-sdk-core (3.181.0) aws-eventstream (~> 1, >= 1.0.2) - aws-partitions (~> 1, >= 1.239.0) + aws-partitions (~> 1, >= 1.651.0) + aws-sigv4 (~> 1.5) + jmespath (~> 1, >= 1.6.1) + aws-sdk-costandusagereportservice (1.49.0) + aws-sdk-core (~> 3, >= 3.177.0) + aws-sigv4 (~> 1.1) + aws-sdk-databasemigrationservice (1.80.0) + aws-sdk-core (~> 3, >= 3.176.0) + aws-sigv4 (~> 1.1) + aws-sdk-dynamodb (1.93.1) + aws-sdk-core (~> 3, >= 3.177.0) + aws-sigv4 (~> 1.1) + aws-sdk-ec2 (1.402.0) + aws-sdk-core (~> 3, >= 3.177.0) + aws-sigv4 (~> 1.1) + aws-sdk-ecr (1.63.0) + aws-sdk-core (~> 3, >= 3.177.0) + aws-sigv4 (~> 1.1) + aws-sdk-ecrpublic (1.21.0) + aws-sdk-core (~> 3, >= 3.177.0) + aws-sigv4 (~> 1.1) + aws-sdk-ecs (1.128.0) + aws-sdk-core (~> 3, >= 3.177.0) + aws-sigv4 (~> 1.1) + aws-sdk-efs (1.65.0) + aws-sdk-core (~> 3, >= 3.177.0) + aws-sigv4 (~> 1.1) + aws-sdk-eks (1.89.0) + aws-sdk-core (~> 3, >= 3.177.0) + aws-sigv4 (~> 1.1) + aws-sdk-elasticache (1.91.0) + aws-sdk-core (~> 3, >= 3.177.0) aws-sigv4 (~> 1.1) - jmespath (~> 1.0) - aws-sdk-costandusagereportservice (1.28.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-elasticbeanstalk (1.59.0) + aws-sdk-core (~> 3, >= 3.177.0) aws-sigv4 (~> 1.1) - aws-sdk-dynamodb (1.55.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-elasticloadbalancing (1.47.0) + aws-sdk-core (~> 3, >= 3.177.0) aws-sigv4 (~> 1.1) - aws-sdk-ec2 (1.200.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-elasticloadbalancingv2 (1.90.0) + aws-sdk-core (~> 3, >= 3.177.0) aws-sigv4 (~> 1.1) - aws-sdk-ecr (1.39.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-elasticsearchservice (1.76.0) + aws-sdk-core (~> 3, >= 3.177.0) aws-sigv4 (~> 1.1) - aws-sdk-ecs (1.70.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-emr (1.53.0) + aws-sdk-core (~> 3, >= 3.121.2) aws-sigv4 (~> 1.1) - aws-sdk-efs (1.36.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-eventbridge (1.46.0) + aws-sdk-core (~> 3, >= 3.176.0) aws-sigv4 (~> 1.1) - aws-sdk-eks (1.45.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-firehose (1.56.0) + aws-sdk-core (~> 3, >= 3.177.0) aws-sigv4 (~> 1.1) - aws-sdk-elasticache (1.44.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-glue (1.145.0) + aws-sdk-core (~> 3, >= 3.176.0) aws-sigv4 (~> 1.1) - aws-sdk-elasticbeanstalk (1.38.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-guardduty (1.77.0) + aws-sdk-core (~> 3, >= 3.177.0) aws-sigv4 (~> 1.1) - aws-sdk-elasticloadbalancing (1.29.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-iam (1.86.0) + aws-sdk-core (~> 3, >= 3.177.0) aws-sigv4 (~> 1.1) - aws-sdk-elasticloadbalancingv2 (1.53.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-kafka (1.62.0) + aws-sdk-core (~> 3, >= 3.177.0) aws-sigv4 (~> 1.1) - aws-sdk-elasticsearchservice (1.43.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-kinesis (1.50.0) + aws-sdk-core (~> 3, >= 3.177.0) aws-sigv4 (~> 1.1) - aws-sdk-firehose (1.35.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-kms (1.71.0) + aws-sdk-core (~> 3, >= 3.177.0) aws-sigv4 (~> 1.1) - aws-sdk-guardduty (1.42.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-lambda (1.104.0) + aws-sdk-core (~> 3, >= 3.177.0) aws-sigv4 (~> 1.1) - aws-sdk-iam (1.46.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-mq (1.40.0) + aws-sdk-core (~> 3, >= 3.120.0) aws-sigv4 (~> 1.1) - aws-sdk-kafka (1.29.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-networkfirewall (1.34.0) + aws-sdk-core (~> 3, >= 3.177.0) aws-sigv4 (~> 1.1) - aws-sdk-kinesis (1.30.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-networkmanager (1.35.0) + aws-sdk-core (~> 3, >= 3.177.0) aws-sigv4 (~> 1.1) - aws-sdk-kms (1.39.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-organizations (1.77.0) + aws-sdk-core (~> 3, >= 3.176.0) aws-sigv4 (~> 1.1) - aws-sdk-lambda (1.51.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-ram (1.26.0) + aws-sdk-core (~> 3, >= 3.112.0) aws-sigv4 (~> 1.1) - aws-sdk-organizations (1.17.0) - aws-sdk-core (~> 3, >= 3.39.0) - aws-sigv4 (~> 1.0) - aws-sdk-rds (1.103.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-rds (1.192.0) + aws-sdk-core (~> 3, >= 3.177.0) aws-sigv4 (~> 1.1) - aws-sdk-redshift (1.50.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-redshift (1.97.0) + aws-sdk-core (~> 3, >= 3.177.0) aws-sigv4 (~> 1.1) - aws-sdk-route53 (1.44.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-route53 (1.78.0) + aws-sdk-core (~> 3, >= 3.177.0) aws-sigv4 (~> 1.1) - aws-sdk-route53domains (1.28.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-route53domains (1.51.0) + aws-sdk-core (~> 3, >= 3.177.0) aws-sigv4 (~> 1.1) - aws-sdk-route53resolver (1.21.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-route53resolver (1.47.0) + aws-sdk-core (~> 3, >= 3.177.0) aws-sigv4 (~> 1.1) - aws-sdk-s3 (1.83.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-s3 (1.134.0) + aws-sdk-core (~> 3, >= 3.181.0) aws-sdk-kms (~> 1) + aws-sigv4 (~> 1.6) + aws-sdk-s3control (1.43.0) + aws-sdk-core (~> 3, >= 3.122.0) aws-sigv4 (~> 1.1) - aws-sdk-securityhub (1.35.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-secretsmanager (1.46.0) + aws-sdk-core (~> 3, >= 3.112.0) aws-sigv4 (~> 1.1) - aws-sdk-ses (1.36.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-securityhub (1.91.0) + aws-sdk-core (~> 3, >= 3.177.0) aws-sigv4 (~> 1.1) - aws-sdk-sms (1.27.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-servicecatalog (1.60.0) + aws-sdk-core (~> 3, >= 3.112.0) aws-sigv4 (~> 1.1) - aws-sdk-sns (1.33.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-ses (1.41.0) + aws-sdk-core (~> 3, >= 3.120.0) aws-sigv4 (~> 1.1) - aws-sdk-sqs (1.34.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-shield (1.56.0) + aws-sdk-core (~> 3, >= 3.177.0) aws-sigv4 (~> 1.1) - aws-sdk-ssm (1.93.0) - aws-sdk-core (~> 3, >= 3.109.0) + aws-sdk-signer (1.32.0) + aws-sdk-core (~> 3, >= 3.120.0) aws-sigv4 (~> 1.1) - aws-sigv4 (1.2.2) + aws-sdk-simpledb (1.29.0) + aws-sdk-core (~> 3, >= 3.120.0) + aws-sigv2 (~> 1.0) + aws-sdk-sms (1.48.0) + aws-sdk-core (~> 3, >= 3.177.0) + aws-sigv4 (~> 1.1) + aws-sdk-sns (1.65.0) + aws-sdk-core (~> 3, >= 3.177.0) + aws-sigv4 (~> 1.1) + aws-sdk-sqs (1.62.0) + aws-sdk-core (~> 3, >= 3.177.0) + aws-sigv4 (~> 1.1) + aws-sdk-ssm (1.156.0) + aws-sdk-core (~> 3, >= 3.177.0) + aws-sigv4 (~> 1.1) + aws-sdk-states (1.39.0) + aws-sdk-core (~> 3, >= 3.112.0) + aws-sigv4 (~> 1.1) + aws-sdk-synthetics (1.19.0) + aws-sdk-core (~> 3, >= 3.121.2) + aws-sigv4 (~> 1.1) + aws-sdk-transfer (1.73.0) + aws-sdk-core (~> 3, >= 3.176.0) + aws-sigv4 (~> 1.1) + aws-sdk-waf (1.43.0) + aws-sdk-core (~> 3, >= 3.122.0) + aws-sigv4 (~> 1.1) + aws-sigv2 (1.1.0) + aws-sigv4 (1.6.0) aws-eventstream (~> 1, >= 1.0.2) azure_graph_rbac (0.17.2) ms_rest_azure (~> 0.12.0) - azure_mgmt_key_vault (0.17.6) + azure_mgmt_key_vault (0.17.7) ms_rest_azure (~> 0.12.0) - azure_mgmt_resources (0.18.0) + azure_mgmt_resources (0.18.2) ms_rest_azure (~> 0.12.0) - azure_mgmt_security (0.18.2) + azure_mgmt_security (0.19.0) ms_rest_azure (~> 0.12.0) - azure_mgmt_storage (0.22.0) + azure_mgmt_storage (0.23.0) ms_rest_azure (~> 0.12.0) - bcrypt_pbkdf (1.0.1) + bcrypt_pbkdf (1.1.0) + bson (4.15.0) builder (3.2.4) - chef-config (16.5.77) + chef-config (18.2.7) addressable - chef-utils (= 16.5.77) + chef-utils (= 18.2.7) fuzzyurl mixlib-config (>= 2.2.12, < 4.0) mixlib-shellout (>= 2.0, < 4.0) tomlrb (~> 1.2) - chef-telemetry (1.0.14) + chef-telemetry (1.1.1) chef-config concurrent-ruby (~> 1.0) - ffi-yajl (~> 2.2) - chef-utils (16.5.77) + chef-utils (18.2.7) + concurrent-ruby coderay (1.1.3) - concurrent-ruby (1.1.7) + concurrent-ruby (1.2.2) + cookstyle (7.32.2) + rubocop (= 1.25.1) declarative (0.0.20) - declarative-option (0.1.0) - delegate (0.1.0) - diff-lcs (1.4.4) - docker-api (2.0.0) + delegate (0.3.0) + diff-lcs (1.5.0) + docker-api (2.2.0) excon (>= 0.47.0) multi_json - domain_name (0.5.20180417) + domain_name (0.5.20190701) unf (>= 0.0.5, < 1.0.0) - dry-configurable (0.11.6) + dry-configurable (1.1.0) + dry-core (~> 1.0, < 2) + zeitwerk (~> 2.6) + dry-core (1.0.1) concurrent-ruby (~> 1.0) - dry-core (~> 0.4, >= 0.4.7) - dry-equalizer (~> 0.2) - dry-container (0.7.2) + zeitwerk (~> 2.6) + dry-inflector (1.0.0) + dry-initializer (3.1.1) + dry-logic (1.5.0) concurrent-ruby (~> 1.0) - dry-configurable (~> 0.1, >= 0.1.3) - dry-core (0.4.9) + dry-core (~> 1.0, < 2) + zeitwerk (~> 2.6) + dry-schema (1.13.3) concurrent-ruby (~> 1.0) - dry-equalizer (0.3.0) - dry-inflector (0.2.0) - dry-logic (0.6.1) + dry-configurable (~> 1.0, >= 1.0.1) + dry-core (~> 1.0, < 2) + dry-initializer (~> 3.0) + dry-logic (>= 1.4, < 2) + dry-types (>= 1.7, < 2) + zeitwerk (~> 2.6) + dry-types (1.7.1) concurrent-ruby (~> 1.0) - dry-core (~> 0.2) - dry-equalizer (~> 0.2) - dry-types (0.14.1) + dry-core (~> 1.0) + dry-inflector (~> 1.0) + dry-logic (~> 1.4) + zeitwerk (~> 2.6) + dry-validation (1.10.0) concurrent-ruby (~> 1.0) - dry-container (~> 0.3) - dry-core (~> 0.4, >= 0.4.4) - dry-equalizer (~> 0.2) - dry-inflector (~> 0.1, >= 0.1.2) - dry-logic (~> 0.5, >= 0.5) - dry-validation (0.13.3) - concurrent-ruby (~> 1.0) - dry-configurable (~> 0.1, >= 0.1.3) - dry-core (~> 0.2, >= 0.2.1) - dry-equalizer (~> 0.2) - dry-logic (~> 0.5, >= 0.5.0) - dry-types (~> 0.14.0) - ecma-re-validator (0.2.1) - regexp_parser (~> 1.2) - ed25519 (1.2.4) - erubi (1.9.0) - excon (0.76.0) - faraday (1.0.1) - multipart-post (>= 1.2, < 3) + dry-core (~> 1.0, < 2) + dry-initializer (~> 3.0) + dry-schema (>= 1.12, < 2) + zeitwerk (~> 2.6) + ed25519 (1.3.0) + erubi (1.12.0) + excon (0.102.0) + faraday (1.10.3) + faraday-em_http (~> 1.0) + faraday-em_synchrony (~> 1.0) + faraday-excon (~> 1.1) + faraday-httpclient (~> 1.0) + faraday-multipart (~> 1.0) + faraday-net_http (~> 1.0) + faraday-net_http_persistent (~> 1.0) + faraday-patron (~> 1.0) + faraday-rack (~> 1.0) + faraday-retry (~> 1.0) + ruby2_keywords (>= 0.0.4) faraday-cookie_jar (0.0.7) faraday (>= 0.8.0) http-cookie (~> 1.0.0) + faraday-em_http (1.0.0) + faraday-em_synchrony (1.0.0) + faraday-excon (1.1.0) + faraday-follow_redirects (0.3.0) + faraday (>= 1, < 3) + faraday-httpclient (1.0.1) + faraday-multipart (1.0.4) + multipart-post (~> 2) + faraday-net_http (1.0.1) + faraday-net_http_persistent (1.2.0) + faraday-patron (1.0.0) + faraday-rack (1.0.0) + faraday-retry (1.0.3) faraday_middleware (1.0.0) faraday (~> 1.0) - ffi (1.11.1) + ffi (1.15.5) ffi-compiler (1.0.1) ffi (>= 1.0.0) rake - ffi-yajl (2.3.4) - libyajl2 (~> 1.2) fuzzyurl (0.9.0) - google-api-client (0.44.0) + google-api-client (0.52.0) addressable (~> 2.5, >= 2.5.1) googleauth (~> 0.9) httpclient (>= 2.8.1, < 3.0) mini_mime (~> 1.0) representable (~> 3.0) retriable (>= 2.0, < 4.0) + rexml signet (~> 0.12) - googleauth (0.13.0) + googleauth (0.14.0) faraday (>= 0.17.3, < 2.0) jwt (>= 1.4, < 3.0) memoist (~> 0.16) multi_json (~> 1.11) os (>= 0.9, < 2.0) signet (~> 0.14) - gssapi (1.3.0) + gssapi (1.3.1) ffi (>= 1.0.1) - gyoku (1.3.1) + gyoku (1.4.0) builder (>= 2.1.2) - hana (1.3.6) - hashie (3.6.0) - http (4.2.0) - addressable (~> 2.3) + rexml (~> 3.0) + hashie (4.1.0) + highline (2.1.0) + http (5.1.1) + addressable (~> 2.8) http-cookie (~> 1.0) - http-form_data (~> 2.0) - http-parser (~> 1.2.0) + http-form_data (~> 2.2) + llhttp-ffi (~> 0.4.0) http-accept (1.7.0) - http-cookie (1.0.3) + http-cookie (1.0.5) domain_name (~> 0.5) - http-form_data (2.1.1) - http-parser (1.2.1) - ffi-compiler (>= 1.0, < 2.0) + http-form_data (2.3.0) httpclient (2.8.3) - i18n (1.8.5) + i18n (1.14.1) concurrent-ruby (~> 1.0) inifile (3.0.0) - inspec (4.23.11) + inspec (5.21.29) + cookstyle faraday_middleware (>= 0.12.2, < 1.1) - inspec-core (= 4.23.11) - train (~> 3.0) - train-aws (~> 0.1) + inspec-core (= 5.21.29) + mongo (= 2.13.2) + progress_bar (~> 1.3.3) + rake + train (~> 3.10) + train-aws (~> 0.2) train-habitat (~> 0.1) train-winrm (~> 0.2) - inspec-core (4.23.11) + inspec-core (5.21.29) addressable (~> 2.4) - chef-telemetry (~> 1.0) - faraday (>= 0.9.0, < 1.1) - hashie (~> 3.4) - json_schemer (>= 0.2.1, < 0.2.12) + chef-telemetry (~> 1.0, >= 1.0.8) + faraday (>= 1, < 3) + faraday-follow_redirects (~> 0.3) + hashie (>= 3.4, < 5.0) license-acceptance (>= 0.2.13, < 3.0) method_source (>= 0.8, < 2.0) mixlib-log (~> 3.0) multipart-post (~> 2.0) parallel (~> 1.9) - parslet (~> 1.5) + parslet (>= 1.5, < 2.0) pry (~> 0.13) - rspec (~> 3.9) + rspec (>= 3.9, <= 3.11) rspec-its (~> 1.2) - rubyzip (~> 1.2, >= 1.2.2) + rubyzip (>= 1.2.2, < 3.0) semverse (~> 3.0) sslshake (~> 1.2) thor (>= 0.20, < 2.0) - tomlrb (~> 1.2.0) - train-core (~> 3.0) + tomlrb (>= 1.2, < 2.1) + train-core (~> 3.10) tty-prompt (~> 0.17) tty-table (~> 0.10) - jmespath (1.4.0) - json (2.3.1) - json_schemer (0.2.11) - ecma-re-validator (~> 0.2) - hana (~> 1.3) - regexp_parser (~> 1.5) - uri_template (~> 0.7) - jwt (2.2.2) - kitchen-terraform (5.8.0) - delegate (~> 0.1.0) - dry-validation (~> 0.13) - inspec (>= 3, < 5, != 4.24.32, != 4.24.28, != 4.24.26) + jmespath (1.6.2) + json (2.6.3) + jsonpath (1.1.3) + multi_json + jwt (2.7.1) + kitchen-terraform (7.0.2) + delegate (~> 0.3.0) + dry-validation (~> 1.6) + inspec (~> 5.21, >= 5.21.29) json (~> 2.3) - mixlib-shellout (~> 3.0) - test-kitchen (~> 2.1) - tty-which (~> 0.4.0) - kubeclient (4.5.0) - http (>= 3.0, < 5.0) - recursive-open-struct (~> 1.0, >= 1.0.4) + test-kitchen (>= 2.1, < 4.0) + tty-which (~> 0.5.0) + kubeclient (4.11.0) + http (>= 3.0, < 6.0) + jsonpath (~> 1.0) + recursive-open-struct (~> 1.1, >= 1.1.1) rest-client (~> 2.0) - libyajl2 (1.2.0) - license-acceptance (2.1.2) + license-acceptance (2.1.13) pastel (~> 0.7) - tomlrb (~> 1.2) + tomlrb (>= 1.2, < 3.0) tty-box (~> 0.6) tty-prompt (~> 0.20) little-plugger (1.1.4) - logging (2.3.0) + llhttp-ffi (0.4.0) + ffi-compiler (~> 1.0) + rake (~> 13.0) + logging (2.3.1) little-plugger (~> 1.1) multi_json (~> 1.14) memoist (0.16.2) method_source (1.0.0) - mime-types (3.2.2) + mime-types (3.4.1) mime-types-data (~> 3.2015) - mime-types-data (3.2019.0331) - mini_mime (1.0.2) - mini_portile2 (2.5.1) - minitest (5.14.2) - mixlib-config (3.0.9) + mime-types-data (3.2022.0105) + mini_mime (1.1.5) + mini_portile2 (2.8.6) + minitest (5.19.0) + mixlib-config (3.0.27) tomlrb - mixlib-install (3.12.3) + mixlib-install (3.12.27) mixlib-shellout mixlib-versioning thor mixlib-log (3.0.9) - mixlib-shellout (3.1.6) + mixlib-shellout (3.2.7) chef-utils mixlib-versioning (1.2.12) + mongo (2.13.2) + bson (>= 4.8.2, < 5.0.0) ms_rest (0.7.6) concurrent-ruby (~> 1.0) faraday (>= 0.9, < 2.0.0) @@ -369,33 +481,41 @@ GEM faraday-cookie_jar (~> 0.0.6) ms_rest (~> 0.7.6) multi_json (1.15.0) - multipart-post (2.1.1) - net-scp (3.0.0) - net-ssh (>= 2.6.5, < 7.0.0) - net-ssh (6.1.0) + multipart-post (2.3.0) + net-scp (4.0.0) + net-ssh (>= 2.6.5, < 8.0.0) + net-ssh (7.2.0) net-ssh-gateway (2.0.0) net-ssh (>= 4.0.0) netrc (0.11.0) - nokogiri (1.11.4) - mini_portile2 (~> 2.5.0) + nokogiri (1.16.5) + mini_portile2 (~> 2.8.2) racc (~> 1.4) nori (2.6.0) - os (1.1.1) - parallel (1.19.2) + options (2.3.2) + os (1.1.4) + parallel (1.23.0) + parser (3.2.2.3) + ast (~> 2.4.1) + racc parslet (1.8.2) pastel (0.8.0) tty-color (~> 0.5) - pry (0.13.1) + progress_bar (1.3.3) + highline (>= 1.6, < 3) + options (~> 2.3.0) + pry (0.14.2) coderay (~> 1.1) method_source (~> 1.0) - public_suffix (4.0.6) - racc (1.5.2) - rake (13.0.0) - recursive-open-struct (1.1.0) - regexp_parser (1.8.2) - representable (3.0.4) + public_suffix (5.0.3) + racc (1.7.3) + rainbow (3.1.1) + rake (13.0.6) + recursive-open-struct (1.1.3) + regexp_parser (2.8.1) + representable (3.2.0) declarative (< 0.1.0) - declarative-option (< 0.2.0) + trailblazer-option (>= 0.1.1, < 0.2.0) uber (< 0.2.0) rest-client (2.1.0) http-accept (>= 1.7.0, < 2.0) @@ -403,71 +523,91 @@ GEM mime-types (>= 1.16, < 4.0) netrc (~> 0.8) retriable (3.1.2) - rspec (3.9.0) - rspec-core (~> 3.9.0) - rspec-expectations (~> 3.9.0) - rspec-mocks (~> 3.9.0) - rspec-core (3.9.3) - rspec-support (~> 3.9.3) - rspec-expectations (3.9.2) + rexml (3.3.9) + rspec (3.11.0) + rspec-core (~> 3.11.0) + rspec-expectations (~> 3.11.0) + rspec-mocks (~> 3.11.0) + rspec-core (3.11.0) + rspec-support (~> 3.11.0) + rspec-expectations (3.11.1) diff-lcs (>= 1.2.0, < 2.0) - rspec-support (~> 3.9.0) + rspec-support (~> 3.11.0) rspec-its (1.3.0) rspec-core (>= 3.0.0) rspec-expectations (>= 3.0.0) - rspec-mocks (3.9.1) + rspec-mocks (3.11.2) diff-lcs (>= 1.2.0, < 2.0) - rspec-support (~> 3.9.0) - rspec-support (3.9.3) - rubyntlm (0.6.2) - rubyzip (1.3.0) - semverse (3.0.0) - signet (0.14.0) - addressable (~> 2.3) - faraday (>= 0.17.3, < 2.0) + rspec-support (~> 3.11.0) + rspec-support (3.11.1) + rubocop (1.25.1) + parallel (~> 1.10) + parser (>= 3.1.0.0) + rainbow (>= 2.2.2, < 4.0) + regexp_parser (>= 1.8, < 3.0) + rexml + rubocop-ast (>= 1.15.1, < 2.0) + ruby-progressbar (~> 1.7) + unicode-display_width (>= 1.4.0, < 3.0) + rubocop-ast (1.29.0) + parser (>= 3.2.1.0) + ruby-progressbar (1.13.0) + ruby2_keywords (0.0.5) + rubyntlm (0.6.3) + rubyzip (2.3.2) + semverse (3.0.2) + signet (0.17.0) + addressable (~> 2.8) + faraday (>= 0.17.5, < 3.a) jwt (>= 1.5, < 3.0) multi_json (~> 1.10) sslshake (1.3.1) - strings (0.2.0) + strings (0.2.1) strings-ansi (~> 0.2) - unicode-display_width (~> 1.5) + unicode-display_width (>= 1.5, < 3.0) unicode_utils (~> 1.4) strings-ansi (0.2.0) - test-kitchen (2.7.2) + test-kitchen (3.5.0) bcrypt_pbkdf (~> 1.0) + chef-utils (>= 16.4.35) ed25519 (~> 1.2) license-acceptance (>= 1.0.11, < 3.0) mixlib-install (~> 3.6) mixlib-shellout (>= 1.2, < 4.0) - net-scp (>= 1.1, < 4.0) - net-ssh (>= 2.9, < 7.0) + net-scp (>= 1.1, < 5.0) + net-ssh (>= 2.9, < 8.0) net-ssh-gateway (>= 1.2, < 3.0) thor (>= 0.19, < 2.0) winrm (~> 2.0) winrm-elevated (~> 1.0) winrm-fs (~> 1.1) - thor (1.0.1) - thread_safe (0.3.6) + thor (1.2.2) timeliness (0.3.10) - tomlrb (1.2.9) - train (3.3.24) - activesupport (>= 5.2.4.3, < 6.0.0) + tomlrb (1.3.0) + trailblazer-option (0.1.2) + train (3.10.8) + activesupport (>= 6.0.3.1) azure_graph_rbac (~> 0.16) azure_mgmt_key_vault (~> 0.17) azure_mgmt_resources (~> 0.15) azure_mgmt_security (~> 0.18) azure_mgmt_storage (~> 0.18) docker-api (>= 1.26, < 3.0) - google-api-client (>= 0.23.9, < 0.44.1) - googleauth (>= 0.6.6, < 0.13.1) + google-api-client (>= 0.23.9, <= 0.52.0) + googleauth (>= 0.6.6, <= 0.14.0) inifile (~> 3.0) - train-core (= 3.3.24) + train-core (= 3.10.8) train-winrm (~> 0.2) - train-aws (0.1.18) + train-aws (0.2.36) + aws-sdk-account (~> 1.14) + aws-sdk-alexaforbusiness (~> 1.0) + aws-sdk-amplify (~> 1.32.0) aws-sdk-apigateway (~> 1.0) aws-sdk-apigatewayv2 (~> 1.0) + aws-sdk-applicationautoscaling (>= 1.46, < 1.52) aws-sdk-athena (~> 1.0) - aws-sdk-autoscaling (~> 1.22.0) + aws-sdk-autoscaling (>= 1.22, < 1.93) + aws-sdk-batch (>= 1.36, < 1.74) aws-sdk-budgets (~> 1.0) aws-sdk-cloudformation (~> 1.0) aws-sdk-cloudfront (~> 1.0) @@ -475,16 +615,21 @@ GEM aws-sdk-cloudhsmv2 (~> 1.0) aws-sdk-cloudtrail (~> 1.8) aws-sdk-cloudwatch (~> 1.13) + aws-sdk-cloudwatchevents (>= 1.36, < 1.63) aws-sdk-cloudwatchlogs (~> 1.13) aws-sdk-codecommit (~> 1.0) aws-sdk-codedeploy (~> 1.0) aws-sdk-codepipeline (~> 1.0) + aws-sdk-cognitoidentity (>= 1.26, < 1.46) + aws-sdk-cognitoidentityprovider (>= 1.46, < 1.77) aws-sdk-configservice (~> 1.21) aws-sdk-core (~> 3.0) aws-sdk-costandusagereportservice (~> 1.6) + aws-sdk-databasemigrationservice (>= 1.42, < 1.81) aws-sdk-dynamodb (~> 1.31) aws-sdk-ec2 (~> 1.70) aws-sdk-ecr (~> 1.18) + aws-sdk-ecrpublic (~> 1.3) aws-sdk-ecs (~> 1.30) aws-sdk-efs (~> 1.0) aws-sdk-eks (~> 1.9) @@ -493,48 +638,65 @@ GEM aws-sdk-elasticloadbalancing (~> 1.8) aws-sdk-elasticloadbalancingv2 (~> 1.0) aws-sdk-elasticsearchservice (~> 1.0) + aws-sdk-emr (~> 1.53.0) + aws-sdk-eventbridge (>= 1.24, < 1.47) aws-sdk-firehose (~> 1.0) + aws-sdk-glue (>= 1.71, < 1.146) aws-sdk-guardduty (~> 1.31) aws-sdk-iam (~> 1.13) aws-sdk-kafka (~> 1.0) aws-sdk-kinesis (~> 1.0) aws-sdk-kms (~> 1.13) aws-sdk-lambda (~> 1.0) - aws-sdk-organizations (~> 1.17.0) + aws-sdk-mq (~> 1.40.0) + aws-sdk-networkfirewall (>= 1.6.0) + aws-sdk-networkmanager (>= 1.13.0) + aws-sdk-organizations (>= 1.17, < 1.78) + aws-sdk-ram (>= 1.21, < 1.27) aws-sdk-rds (~> 1.43) aws-sdk-redshift (~> 1.0) aws-sdk-route53 (~> 1.0) aws-sdk-route53domains (~> 1.0) aws-sdk-route53resolver (~> 1.0) aws-sdk-s3 (~> 1.30) + aws-sdk-s3control (~> 1.43.0) + aws-sdk-secretsmanager (>= 1.42, < 1.47) aws-sdk-securityhub (~> 1.0) - aws-sdk-ses (~> 1.0) + aws-sdk-servicecatalog (>= 1.48, < 1.61) + aws-sdk-ses (~> 1.41.0) + aws-sdk-shield (~> 1.30) + aws-sdk-signer (~> 1.32.0) + aws-sdk-simpledb (~> 1.29.0) aws-sdk-sms (~> 1.0) aws-sdk-sns (~> 1.9) aws-sdk-sqs (~> 1.10) aws-sdk-ssm (~> 1.0) - train-core (3.3.24) + aws-sdk-states (>= 1.35, < 1.40) + aws-sdk-synthetics (~> 1.19.0) + aws-sdk-transfer (>= 1.26, < 1.74) + aws-sdk-waf (~> 1.43.0) + train-core (3.10.8) addressable (~> 2.5) ffi (!= 1.13.0) json (>= 1.8, < 3.0) mixlib-shellout (>= 2.0, < 4.0) - net-scp (>= 1.2, < 4.0) - net-ssh (>= 2.9, < 7.0) - train-habitat (0.2.13) - train-winrm (0.2.11) - winrm (~> 2.0) + net-scp (>= 1.2, < 5.0) + net-ssh (>= 2.9, < 8.0) + train-habitat (0.2.22) + train-winrm (0.2.13) + winrm (>= 2.3.6, < 3.0) winrm-elevated (~> 1.2.2) winrm-fs (~> 1.0) - tty-box (0.6.0) + tty-box (0.7.0) pastel (~> 0.8) strings (~> 0.2.0) tty-cursor (~> 0.7) - tty-color (0.5.2) + tty-color (0.6.0) tty-cursor (0.7.1) - tty-prompt (0.22.0) + tty-prompt (0.23.1) pastel (~> 0.8) tty-reader (~> 0.8) - tty-reader (0.8.0) + tty-reader (0.9.0) tty-cursor (~> 0.7) tty-screen (~> 0.8) wisper (~> 2.0) @@ -543,17 +705,16 @@ GEM pastel (~> 0.8) strings (~> 0.2.0) tty-screen (~> 0.8) - tty-which (0.4.2) - tzinfo (1.2.7) - thread_safe (~> 0.1) + tty-which (0.5.0) + tzinfo (2.0.6) + concurrent-ruby (~> 1.0) uber (0.1.0) unf (0.1.4) unf_ext - unf_ext (0.0.7.2) - unicode-display_width (1.7.0) + unf_ext (0.0.8.2) + unicode-display_width (2.4.2) unicode_utils (1.4.0) - uri_template (0.7.0) - winrm (2.3.4) + winrm (2.3.6) builder (>= 2.1.2) erubi (~> 1.8) gssapi (~> 1.2) @@ -561,26 +722,30 @@ GEM httpclient (~> 2.2, >= 2.2.0.2) logging (>= 1.6.1, < 3.0) nori (~> 2.0) - rubyntlm (~> 0.6.0, >= 0.6.1) - winrm-elevated (1.2.2) + rubyntlm (~> 0.6.0, >= 0.6.3) + winrm-elevated (1.2.3) erubi (~> 1.8) winrm (~> 2.0) winrm-fs (~> 1.0) - winrm-fs (1.3.3) + winrm-fs (1.3.5) erubi (~> 1.8) logging (>= 1.6.1, < 3.0) - rubyzip (~> 1.1) + rubyzip (~> 2.0) winrm (~> 2.0) wisper (2.0.1) + zeitwerk (2.6.11) PLATFORMS ruby DEPENDENCIES - kitchen-terraform (~> 5.8)! - kubeclient (~> 4.0)! - nokogiri (~> 1.11)! - rest-client (~> 2.0)! + kitchen-terraform (~> 7.0) + kubeclient (~> 4.11) + nokogiri (~> 1.16) + rest-client (~> 2.1) + +RUBY VERSION + ruby 3.3.1p55 BUNDLED WITH - 1.17.3 + 2.5.10 diff --git a/infra/build/developer-tools/build/data/requirements.txt b/infra/build/developer-tools/build/data/requirements.txt deleted file mode 100644 index c77a7a16615..00000000000 --- a/infra/build/developer-tools/build/data/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -google-api-python-client~=1.7 -google-auth~=1.6 -Jinja2==2.11.3 -PyGithub==1.51 diff --git a/infra/build/developer-tools/build/home/.tflint.example.hcl b/infra/build/developer-tools/build/home/.tflint.example.hcl new file mode 100644 index 00000000000..7cd335a5f0d --- /dev/null +++ b/infra/build/developer-tools/build/home/.tflint.example.hcl @@ -0,0 +1,32 @@ +// disable all rules by default +config { + disabled_by_default = true +} + +plugin "terraform" { + enabled = true +} + +rule "terraform_deprecated_index" { + enabled = true +} + +rule "terraform_deprecated_interpolation" { + enabled = true +} + +rule "terraform_empty_list_equality" { + enabled = true +} + +rule "terraform_module_pinned_source" { + enabled = true +} + +rule "terraform_module_version" { + enabled = true +} + +rule "terraform_unused_declarations" { + enabled = true +} diff --git a/infra/build/developer-tools/build/home/.tflint.module.hcl b/infra/build/developer-tools/build/home/.tflint.module.hcl new file mode 100644 index 00000000000..dd82ca95767 --- /dev/null +++ b/infra/build/developer-tools/build/home/.tflint.module.hcl @@ -0,0 +1,69 @@ +// disable all rules by default +config { + disabled_by_default = true +} + +plugin "terraform" { + enabled = true +} + +rule "terraform_deprecated_index" { + enabled = true +} + +rule "terraform_deprecated_interpolation" { + enabled = true +} + +rule "terraform_empty_list_equality" { + enabled = true +} + +rule "terraform_module_pinned_source" { + enabled = true +} + +rule "terraform_module_version" { + enabled = true +} + +rule "terraform_unused_declarations" { + enabled = true +} + +// module specific +rule "terraform_documented_outputs" { + enabled = true +} + +rule "terraform_documented_variables" { + enabled = true +} + +rule "terraform_module_pinned_source" { + enabled = true +} + +rule "terraform_module_version" { + enabled = true +} + +rule "terraform_required_providers" { + enabled = true +} + +rule "terraform_required_version" { + enabled = true +} + +rule "terraform_typed_variables" { + enabled = true +} + +plugin "blueprint" { + enabled = true +} + +rule "terraform_required_version_range" { + enabled = true +} diff --git a/infra/build/developer-tools/build/install_addlicense.sh b/infra/build/developer-tools/build/install_addlicense.sh index e3c6172742b..22390bd08ae 100755 --- a/infra/build/developer-tools/build/install_addlicense.sh +++ b/infra/build/developer-tools/build/install_addlicense.sh @@ -18,5 +18,5 @@ set -u cd /build -go get -u github.com/google/addlicense@v1.0.0 +go install github.com/google/addlicense@v1.1.0 ln -s $(go env GOPATH)/bin/addlicense /usr/local/bin/ diff --git a/infra/build/developer-tools/build/install_bats.sh b/infra/build/developer-tools/build/install_bats.sh index deb76152c42..e17d28ed022 100755 --- a/infra/build/developer-tools/build/install_bats.sh +++ b/infra/build/developer-tools/build/install_bats.sh @@ -16,32 +16,29 @@ set -e set -u +mkdir -p /build/install_bats +cd /build/install_bats + BATS_VERSION=$1 BATS_SUPPORT_VERSION=$2 BATS_ASSERT_VERSION=$3 BATS_MOCK_VERSION=$4 -# bats required envsubst missing in Alpine by default -apk add gettext libintl - -cd /build -wget "https://github.com/sstephenson/bats/archive/v${BATS_VERSION}.zip" -unzip "v${BATS_VERSION}.zip" +wget -nv "https://github.com/sstephenson/bats/archive/v${BATS_VERSION}.zip" +unzip -q "v${BATS_VERSION}.zip" cd "bats-${BATS_VERSION}" ./install.sh /usr/local -rm -rf "v${BATS_VERSION}" "bats-${BATS_VERSION}" -wget "https://github.com/ztombol/bats-support/archive/v${BATS_SUPPORT_VERSION}.zip" -unzip "v${BATS_SUPPORT_VERSION}.zip" +wget -nv "https://github.com/ztombol/bats-support/archive/v${BATS_SUPPORT_VERSION}.zip" +unzip -q "v${BATS_SUPPORT_VERSION}.zip" cp -r "bats-support-${BATS_SUPPORT_VERSION}" /usr/local/bats-support -rm -rf "v${BATS_SUPPORT_VERSION}.zip" "bats-support-${BATS_SUPPORT_VERSION}" -wget "https://github.com/jasonkarns/bats-assert-1/archive/v${BATS_ASSERT_VERSION}.zip" -unzip "v${BATS_ASSERT_VERSION}.zip" +wget -nv "https://github.com/jasonkarns/bats-assert-1/archive/v${BATS_ASSERT_VERSION}.zip" +unzip -q "v${BATS_ASSERT_VERSION}.zip" cp -r "bats-assert-${BATS_ASSERT_VERSION}" /usr/local/bats-assert -rm -rf "v${BATS_ASSERT_VERSION}.zip" "bats-assert-${BATS_ASSERT_VERSION}" -wget "https://github.com/jasonkarns/bats-mock/archive/v${BATS_MOCK_VERSION}.zip" -unzip "v${BATS_MOCK_VERSION}.zip" +wget -nv "https://github.com/jasonkarns/bats-mock/archive/v${BATS_MOCK_VERSION}.zip" +unzip -q "v${BATS_MOCK_VERSION}.zip" cp -r "bats-mock-${BATS_MOCK_VERSION}" /usr/local/bats-mock -rm -rf "v${BATS_MOCK_VERSION}.zip" "bats-mock-${BATS_MOCK_VERSION}" + +rm -rf /build/install_bats diff --git a/infra/build/developer-tools/build/install_cft_cli.sh b/infra/build/developer-tools/build/install_cft_cli.sh new file mode 100755 index 00000000000..9ad82f6c39a --- /dev/null +++ b/infra/build/developer-tools/build/install_cft_cli.sh @@ -0,0 +1,30 @@ +#! /bin/bash +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e +set -u + +mkdir -p /build/install_cft_cli +cd /build/install_cft_cli + +CFT_CLI_VERSION=$1 + +if ! wget -nv "https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/releases/download/cli%2Fv${CFT_CLI_VERSION}/cft-linux-amd64"; then + wget -nv "https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/releases/download/v${CFT_CLI_VERSION}/cft-linux-amd64" +fi + +install -o 0 -g 0 -m 0755 cft-linux-amd64 /usr/local/bin/cft + +rm -rf /build/install_cft_cli diff --git a/infra/build/developer-tools/build/install_cloud_sdk.sh b/infra/build/developer-tools/build/install_cloud_sdk.sh index d8382ee0f7b..7afff2464a0 100755 --- a/infra/build/developer-tools/build/install_cloud_sdk.sh +++ b/infra/build/developer-tools/build/install_cloud_sdk.sh @@ -20,7 +20,7 @@ CLOUD_SDK_VERSION=$1 cd /build -wget "https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-sdk-${CLOUD_SDK_VERSION}-linux-x86_64.tar.gz" +wget -nv "https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-sdk-${CLOUD_SDK_VERSION}-linux-x86_64.tar.gz" tar -C /usr/local -xzf "google-cloud-sdk-${CLOUD_SDK_VERSION}-linux-x86_64.tar.gz" rm "google-cloud-sdk-${CLOUD_SDK_VERSION}-linux-x86_64.tar.gz" @@ -31,8 +31,12 @@ ln -s /lib /lib64 gcloud config set core/disable_usage_reporting true gcloud config set component_manager/disable_update_check true gcloud config set survey/disable_prompts true -gcloud components install beta --quiet -gcloud components install alpha --quiet +gcloud config set enable_feature_flags false +# https://cloud.google.com/blog/products/containers-kubernetes/kubectl-auth-changes-in-gke +# https://cloud.google.com/docs/terraform/policy-validation/quickstart +gcloud components install alpha beta terraform-tools gke-gcloud-auth-plugin --quiet + +rm -rf /usr/local/google-cloud-sdk/.install/.backup gcloud --version gsutil version -l diff --git a/infra/build/developer-tools/build/install_dependencies.sh b/infra/build/developer-tools/build/install_dependencies.sh deleted file mode 100755 index e04b387b6c2..00000000000 --- a/infra/build/developer-tools/build/install_dependencies.sh +++ /dev/null @@ -1,73 +0,0 @@ -#! /bin/bash -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -e -set -u - -# coreutils provides xargs and other utilities necessary for lint checks -apk add --no-cache coreutils - -# curl is used by unit tests and is nice to have -apk add --no-cache curl - -# findutils provides find which is used by lint checks -apk add --no-cache findutils - -# git is used to clone repositories -apk add --no-cache git - -# grep is used by lint checks -apk add --no-cache grep - -# g++ is probably used to install dependencies like psych, but unsure -apk add --no-cache g++ - -# jq is useful for parsing JSON data -apk add --no-cache jq - -# make is used for executing make tasks -apk add --no-cache make - -# musl-dev provides the standard C headers -apk add --no-cache python3-dev musl-dev - -# openssh is used for ssh-ing into bastion hosts -apk add --no-cache openssh - -# unclear why perl is needed, but is good to have -apk add --no-cache perl - -# python 2 is needed for compatibility and linting -apk add --no-cache python - -# python 3 is needed for python linting -apk add --no-cache python3 - -# py-pip is needed for installing pip packages -apk add --no-cache py-pip - -# ca-certificates is needed to verify the authenticity of artifacts downloaded -# from the internet -apk add --no-cache ca-certificates - -# diffutils contains the full version of diff needed for the -exclude argument. -# That argument is needed for check_documentation in task_helper_functions.sh -apk add --no-cache diffutils - -# rsync is needed for check_documentation in task_helper_functions.sh -apk add --no-cache rsync - -# flake8 and jinja2 are used for lint checks -pip install flake8 jinja2 diff --git a/infra/build/developer-tools/build/install_gsuite_terraform_provider.sh b/infra/build/developer-tools/build/install_gsuite_terraform_provider.sh index c841cd51755..18e66f15cb5 100755 --- a/infra/build/developer-tools/build/install_gsuite_terraform_provider.sh +++ b/infra/build/developer-tools/build/install_gsuite_terraform_provider.sh @@ -16,12 +16,14 @@ set -e set -u -GSUITE_PROVIDER_VERSION=$1 +mkdir -p /build/install_gsuite_terraform_provider +cd /build/install_gsuite_terraform_provider -cd /build +GSUITE_PROVIDER_VERSION=$1 -wget "https://github.com/DeviaVir/terraform-provider-gsuite/releases/download/v${GSUITE_PROVIDER_VERSION}/terraform-provider-gsuite_${GSUITE_PROVIDER_VERSION}_linux_amd64.tgz" -tar xzf "terraform-provider-gsuite_${GSUITE_PROVIDER_VERSION}_linux_amd64.tgz" -rm "terraform-provider-gsuite_${GSUITE_PROVIDER_VERSION}_linux_amd64.tgz" +wget -nv "https://github.com/DeviaVir/terraform-provider-gsuite/releases/download/v${GSUITE_PROVIDER_VERSION}/terraform-provider-gsuite_${GSUITE_PROVIDER_VERSION}_linux_amd64.tgz" +tar -xzf "terraform-provider-gsuite_${GSUITE_PROVIDER_VERSION}_linux_amd64.tgz" install -o 0 -g 0 -m 0755 -d ~/.terraform.d/plugins/ install -o 0 -g 0 -m 0755 "terraform-provider-gsuite_v${GSUITE_PROVIDER_VERSION}" ~/.terraform.d/plugins/ + +rm -rf /build/install_gsuite_terraform_provider diff --git a/infra/build/developer-tools/build/install_hadolint.sh b/infra/build/developer-tools/build/install_hadolint.sh index f32c21cd87f..4d570face90 100755 --- a/infra/build/developer-tools/build/install_hadolint.sh +++ b/infra/build/developer-tools/build/install_hadolint.sh @@ -16,7 +16,10 @@ set -e set -u -cd /build +mkdir -p /build/install_hadolint +cd /build/install_hadolint -wget https://github.com/hadolint/hadolint/releases/download/v1.15.0/hadolint-Linux-x86_64 +wget -nv "https://github.com/hadolint/hadolint/releases/download/v1.15.0/hadolint-Linux-x86_64" install -o 0 -g 0 -m 0755 hadolint-Linux-x86_64 /usr/local/bin/hadolint + +rm -rf /build/install_hadolint diff --git a/infra/build/developer-tools/build/install_kpt.sh b/infra/build/developer-tools/build/install_kpt.sh index 6a3a2747e1a..8852514e45b 100755 --- a/infra/build/developer-tools/build/install_kpt.sh +++ b/infra/build/developer-tools/build/install_kpt.sh @@ -16,11 +16,13 @@ set -e set -u -KPT_VERSION=$1 +mkdir -p /build/install_kpt +cd /build/install_kpt -cd /build +KPT_VERSION=$1 -wget "https://github.com/GoogleContainerTools/kpt/releases/download/v${KPT_VERSION}/kpt_linux_amd64-${KPT_VERSION}.tar.gz" -tar xzf "kpt_linux_amd64-${KPT_VERSION}.tar.gz" -rm "kpt_linux_amd64-${KPT_VERSION}.tar.gz" +wget -nv "https://github.com/GoogleContainerTools/kpt/releases/download/v${KPT_VERSION}/kpt_linux_amd64-${KPT_VERSION}.tar.gz" +tar -xzf "kpt_linux_amd64-${KPT_VERSION}.tar.gz" install -o 0 -g 0 -m 0755 kpt /usr/local/bin/ + +rm -rf /build/install_kpt diff --git a/infra/build/developer-tools/build/install_kubectl.sh b/infra/build/developer-tools/build/install_kubectl.sh deleted file mode 100755 index cb1af3bae4a..00000000000 --- a/infra/build/developer-tools/build/install_kubectl.sh +++ /dev/null @@ -1,22 +0,0 @@ -#! /bin/bash -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -e -set -u - -cd /build - -wget https://storage.googleapis.com/kubernetes-release/release/v1.15.12/bin/linux/amd64/kubectl -install -o 0 -g 0 -m 0755 kubectl /usr/local/bin/kubectl diff --git a/infra/build/developer-tools/build/install_kustomize.sh b/infra/build/developer-tools/build/install_kustomize.sh index 42cb4af3f14..917e95c74e7 100755 --- a/infra/build/developer-tools/build/install_kustomize.sh +++ b/infra/build/developer-tools/build/install_kustomize.sh @@ -16,10 +16,13 @@ set -e set -u -cd /build +mkdir -p /build/install_kustomize +cd /build/install_kustomize KUSTOMIZE_VERSION=$1 -wget -q https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize/v${KUSTOMIZE_VERSION}/kustomize_v${KUSTOMIZE_VERSION}_linux_amd64.tar.gz -tar xzf ./kustomize_v${KUSTOMIZE_VERSION}_linux_amd64.tar.gz +wget -nv https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize/v${KUSTOMIZE_VERSION}/kustomize_v${KUSTOMIZE_VERSION}_linux_amd64.tar.gz +tar -xzf kustomize_v${KUSTOMIZE_VERSION}_linux_amd64.tar.gz install -o 0 -g 0 -m 0755 kustomize /usr/local/bin/kustomize + +rm -rf /build/install_kustomize diff --git a/infra/build/developer-tools/build/install_module-swapper.sh b/infra/build/developer-tools/build/install_module-swapper.sh new file mode 100755 index 00000000000..b6ba0b85b57 --- /dev/null +++ b/infra/build/developer-tools/build/install_module-swapper.sh @@ -0,0 +1,24 @@ +#! /bin/bash +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e +set -u + +MODULE_SWAPPER_VERSION=$1 + +cd /build + +go install github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/module-swapper@v${MODULE_SWAPPER_VERSION} +ln -s $(go env GOPATH)/bin/module-swapper /usr/local/bin/ diff --git a/infra/build/developer-tools/build/install_protoc.sh b/infra/build/developer-tools/build/install_protoc.sh new file mode 100755 index 00000000000..1dff967feb1 --- /dev/null +++ b/infra/build/developer-tools/build/install_protoc.sh @@ -0,0 +1,37 @@ +#! /bin/bash +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e +set -u + +PROTOC_VERSION=$1 +PROTOC_GEN_GO_VERSION=$2 +PROTOC_GEN_GO_GRPC_VERSION=$3 +PROTOC_GEN_GO_INJECT_TAG=$4 + +mkdir -p /build/install_protoc +cd /build/install_protoc + +curl -LO "https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOC_VERSION}/protoc-${PROTOC_VERSION}-linux-x86_64.zip" +unzip -q "protoc-${PROTOC_VERSION}-linux-x86_64.zip" -d $HOME/.local +chmod 755 $HOME/.local/bin/protoc +cp $HOME/.local/bin/protoc /usr/local/bin/ +cp -R $HOME/.local/include/ /usr/local/include/ + +go install google.golang.org/protobuf/cmd/protoc-gen-go@v${PROTOC_GEN_GO_VERSION} +go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v${PROTOC_GEN_GO_GRPC_VERSION} +go install github.com/favadi/protoc-go-inject-tag@v${PROTOC_GEN_GO_INJECT_TAG} + +rm -rf /build/install_protoc diff --git a/infra/build/developer-tools/build/install_shellcheck.sh b/infra/build/developer-tools/build/install_shellcheck.sh index 48b7db20a84..f3ecae8e31d 100755 --- a/infra/build/developer-tools/build/install_shellcheck.sh +++ b/infra/build/developer-tools/build/install_shellcheck.sh @@ -16,9 +16,11 @@ set -e set -u -cd /build +mkdir -p /build/install_shellcheck +cd /build/install_shellcheck -wget https://github.com/koalaman/shellcheck/releases/download/v0.6.0/shellcheck-v0.6.0.linux.x86_64.tar.xz +wget -nv "https://github.com/koalaman/shellcheck/releases/download/v0.6.0/shellcheck-v0.6.0.linux.x86_64.tar.xz" tar -xf shellcheck-v0.6.0.linux.x86_64.tar.xz install -o 0 -g 0 -m 0755 shellcheck-v0.6.0/shellcheck /usr/local/bin/shellcheck -rm -rf shellcheck-v0.6.0 shellcheck-v0.6.0.linux.x86_64.tar.xz + +rm -rf /build/install_shellcheck diff --git a/infra/build/developer-tools/build/install_terraform.sh b/infra/build/developer-tools/build/install_terraform.sh new file mode 100755 index 00000000000..ebacc2e89bb --- /dev/null +++ b/infra/build/developer-tools/build/install_terraform.sh @@ -0,0 +1,28 @@ +#! /bin/bash +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e +set -u + +mkdir -p /build/install_terraform +cd /build/install_terraform + +TERRAFORM_VERSION=$1 + +wget -nv "https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_linux_amd64.zip" +unzip -q terraform_${TERRAFORM_VERSION}_linux_amd64.zip +install -o 0 -g 0 -m 0755 terraform /usr/local/bin/terraform + +rm -rf /build/install_terraform diff --git a/infra/build/developer-tools/build/install_terraform_docs.sh b/infra/build/developer-tools/build/install_terraform_docs.sh index a36ebf6d6cd..c4b84c9edfe 100755 --- a/infra/build/developer-tools/build/install_terraform_docs.sh +++ b/infra/build/developer-tools/build/install_terraform_docs.sh @@ -1,5 +1,5 @@ #! /bin/bash -# Copyright 2019 Google LLC +# Copyright 2019-2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,9 +16,13 @@ set -e set -u -cd /build +mkdir -p /build/install_terraform_docs +cd /build/install_terraform_docs TERRAFORM_DOCS_VERSION=$1 -wget "https://github.com/segmentio/terraform-docs/releases/download/v${TERRAFORM_DOCS_VERSION}/terraform-docs-v${TERRAFORM_DOCS_VERSION}-linux-amd64" -install -o 0 -g 0 -m 0755 "terraform-docs-v${TERRAFORM_DOCS_VERSION}-linux-amd64" /usr/local/bin/terraform-docs +wget -nv "https://github.com/segmentio/terraform-docs/releases/download/v${TERRAFORM_DOCS_VERSION}/terraform-docs-v${TERRAFORM_DOCS_VERSION}-linux-amd64.tar.gz" +tar -xzf terraform-docs-v${TERRAFORM_DOCS_VERSION}-linux-amd64.tar.gz +install -o 0 -g 0 -m 0755 "terraform-docs" /usr/local/bin/terraform-docs + +rm -rf /build/install_terraform_docs diff --git a/infra/build/developer-tools/build/install_terraform_validator.sh b/infra/build/developer-tools/build/install_terraform_validator.sh deleted file mode 100755 index fac07620234..00000000000 --- a/infra/build/developer-tools/build/install_terraform_validator.sh +++ /dev/null @@ -1,28 +0,0 @@ -#! /bin/bash -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -e -set -u - -cd /build - -TERRAFORM_VALIDATOR_VERSION=$1 - -gsutil cp gs://terraform-validator/releases/v${TERRAFORM_VALIDATOR_VERSION}/terraform-validator_linux_amd64-${TERRAFORM_VALIDATOR_VERSION}.tar.gz . -tar -xzvf terraform-validator_linux_amd64-${TERRAFORM_VALIDATOR_VERSION}.tar.gz - -install -o 0 -g 0 -m 0755 terraform-validator /usr/bin/ -rm terraform-validator_linux_amd64-${TERRAFORM_VALIDATOR_VERSION}.tar.gz -rm THIRD_PARTY_NOTICES.zip diff --git a/infra/build/developer-tools/build/install_terragrunt.sh b/infra/build/developer-tools/build/install_terragrunt.sh index 54d3520eb00..435ca0315e8 100755 --- a/infra/build/developer-tools/build/install_terragrunt.sh +++ b/infra/build/developer-tools/build/install_terragrunt.sh @@ -16,9 +16,12 @@ set -e set -u -cd /build +mkdir -p /build/install_terragrunt +cd /build/install_terragrunt TERRAGRUNT_VERSION=$1 -wget -q https://github.com/gruntwork-io/terragrunt/releases/download/v${TERRAGRUNT_VERSION}/terragrunt_linux_amd64 +wget -nv "https://github.com/gruntwork-io/terragrunt/releases/download/v${TERRAGRUNT_VERSION}/terragrunt_linux_amd64" install -o 0 -g 0 -m 0755 terragrunt_linux_amd64 /usr/local/bin/terragrunt + +rm -rf /build/install_terragrunt diff --git a/infra/build/developer-tools/build/install_tflint.sh b/infra/build/developer-tools/build/install_tflint.sh new file mode 100755 index 00000000000..7562310a988 --- /dev/null +++ b/infra/build/developer-tools/build/install_tflint.sh @@ -0,0 +1,28 @@ +#! /bin/bash +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e +set -u + +mkdir -p /build/install_tflint +cd /build/install_tflint + +TF_LINT_VERSION=$1 + +wget -nv "https://github.com/terraform-linters/tflint/releases/download/v${TF_LINT_VERSION}/tflint_linux_amd64.zip" +unzip -q tflint_linux_amd64.zip +install -o 0 -g 0 -m 0755 tflint /usr/local/bin/tflint + +rm -rf /build/install_tflint diff --git a/infra/build/developer-tools/build/install_tflint_plugin.sh b/infra/build/developer-tools/build/install_tflint_plugin.sh new file mode 100755 index 00000000000..9b29ca6c8aa --- /dev/null +++ b/infra/build/developer-tools/build/install_tflint_plugin.sh @@ -0,0 +1,27 @@ +#! /bin/bash +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e +set -u + +mkdir -p /build/install_tflint_plugin +cd /build/install_tflint_plugin + +TFLINT_BP_PLUGIN_VERSION=$1 +wget -nv "https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/releases/download/tflint-ruleset-blueprint%2Fv${TFLINT_BP_PLUGIN_VERSION}/tflint-ruleset-blueprint_linux_amd64.zip" +unzip -q tflint-ruleset-blueprint_linux_amd64.zip +mkdir -p ~/.tflint.d/plugins +install -o 0 -g 0 -m 0755 tflint-ruleset-blueprint ~/.tflint.d/plugins +rm -rf /build/install_tflint_plugin diff --git a/infra/build/developer-tools/build/install_tinkey.sh b/infra/build/developer-tools/build/install_tinkey.sh new file mode 100755 index 00000000000..47fd5bb3c2a --- /dev/null +++ b/infra/build/developer-tools/build/install_tinkey.sh @@ -0,0 +1,30 @@ +#! /bin/bash +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e +set -u + +mkdir -p /build/install_tinkey +cd /build/install_tinkey + +TINKEY_VERSION=$1 + +gsutil cp "gs://tinkey/tinkey-${TINKEY_VERSION}.tar.gz" . +tar -xzf "tinkey-${TINKEY_VERSION}.tar.gz" + +install -o 0 -g 0 -m 0755 tinkey_deploy.jar /usr/bin/ +install -o 0 -g 0 -m 0755 tinkey /usr/bin/ + +rm -rf /build/install_tinkey diff --git a/infra/build/developer-tools/build/scripts/export_tf_outputs.py b/infra/build/developer-tools/build/scripts/export_tf_outputs.py index 42932f0f714..d4e2770f87b 100755 --- a/infra/build/developer-tools/build/scripts/export_tf_outputs.py +++ b/infra/build/developer-tools/build/scripts/export_tf_outputs.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Copyright 2019 Google LLC # diff --git a/infra/build/developer-tools/build/scripts/gh_lint_comment.py b/infra/build/developer-tools/build/scripts/gh_lint_comment.py index f743414c6b9..70cec228df9 100644 --- a/infra/build/developer-tools/build/scripts/gh_lint_comment.py +++ b/infra/build/developer-tools/build/scripts/gh_lint_comment.py @@ -27,14 +27,17 @@ def create_update_comment(token, org, repo_name, pr_number, comment_body): pr_comment for pr_comment in pr_comments if pr_comment.user.id == current_bot_user ] - if not existing_comments: - # add a comment - comment = pr.create_issue_comment(comment_body) - logging.info(f'Added new comment: {comment}') - else: - # edit existing comment - existing_comments[0].edit(comment_body) - logging.info(f'Edited existing comment: {existing_comments[0]}') + # delete comments created previously by bot + for existing_comment in existing_comments: + existing_comment.delete() + logging.info(f'Deleted existing comment: {existing_comment}') + + # prefix comment body with PR author handle + comment_body = f"@{pr.user.login}\n{comment_body}" + + # create new comment + comment = pr.create_issue_comment(comment_body) + logging.info(f'Added new comment: {comment}') def parse_args(): parser = argparse.ArgumentParser(description='Add/edit comments to PRs') diff --git a/infra/build/developer-tools/build/scripts/module-swapper/Readme.md b/infra/build/developer-tools/build/scripts/module-swapper/Readme.md deleted file mode 100644 index d578731bf4f..00000000000 --- a/infra/build/developer-tools/build/scripts/module-swapper/Readme.md +++ /dev/null @@ -1,19 +0,0 @@ -# Module Swapper - -Module Swapper is a utility used for swapping TF registry references with local modules. It will ignore registry references to all other modules except for the one in current directory. - -``` -Usage of module-swapper: - -examples-path string - Path to examples that should be swapped. Defaults to cwd/examples (default "examples") - -registry-prefix string - Module registry prefix (default "terraform-google-modules") - -registry-suffix string - Module registry suffix (default "google") - -restore - Restores disabled modules - -submods-path string - Path to a submodules if any that maybe referenced. Defaults to working dir/modules (default "modules") - -workdir string - Absolute path to root module where examples should be swapped. Defaults to working directory -``` \ No newline at end of file diff --git a/infra/build/developer-tools/build/scripts/module-swapper/cmd/swap.go b/infra/build/developer-tools/build/scripts/module-swapper/cmd/swap.go deleted file mode 100644 index fe90034fc11..00000000000 --- a/infra/build/developer-tools/build/scripts/module-swapper/cmd/swap.go +++ /dev/null @@ -1,242 +0,0 @@ -package cmd - -import ( - "fmt" - "io/ioutil" - "log" - "os" - "path/filepath" - "strings" - - "github.com/go-git/go-git/v5" - "github.com/pmezard/go-difflib/difflib" -) - -type LocalTerraformModule struct { - Name string - Dir string - ModuleFQN string -} - -var ( - terraformExtension = "*.tf" - restoreMarker = "[restore-marker]" - linebreak = "\n" - localModules = []LocalTerraformModule{} -) - -// getRemoteURL gets the URL of a given remote from git repo at dir -func getRemoteURL(dir, remoteName string) (string, error) { - r, err := git.PlainOpen(dir) - if err != nil { - return "", err - } - rm, err := r.Remote(remoteName) - if err != nil { - return "", err - } - return rm.Config().URLs[0], nil -} - -// trimAnySuffixes trims first matching suffix from slice of suffixes -func trimAnySuffixes(s string, suffixes []string) string { - for _, suffix := range suffixes { - if strings.HasSuffix(s, suffix) { - s = s[:len(s)-len(suffix)] - return s - } - } - return s -} - -// getModuleNameRegistry returns module name and registry by parsing git remote -func getModuleNameRegistry(dir string) (string, string, error) { - remote, err := getRemoteURL(dir, "origin") - if err != nil { - return "", "", err - } - - // GH remote will be of form https://github.com/ModuleRegistry/ModuleName - if !strings.Contains(remote, "https://github.com/") { - return "", "", fmt.Errorf("Expected GitHub remote of form https://github.com/ModuleRegistry/ModuleRepo. Got: %s", remote) - } - - // remotes maybe suffixed with a trailing / or .git - remote = trimAnySuffixes(remote, []string{"/", ".git"}) - namePrefix := strings.ReplaceAll(remote, "https://github.com/", "") - if !strings.Contains(namePrefix, "/") { - return "", "", fmt.Errorf("Expected GitHub org/owner of form ModuleRegistry/ModuleRepo. Got: %s", namePrefix) - } - moduleRegistry := namePrefix[:strings.LastIndex(namePrefix, "/")] - repoName := namePrefix[strings.LastIndex(namePrefix, "/")+1:] - - // module repos are prefixed with terraform-google- - if !strings.HasPrefix(repoName, "terraform-google-") { - return "", "", fmt.Errorf("Expected to find repo name prefixed with terraform-google-. Got: %s", repoName) - } - moduleName := strings.ReplaceAll(repoName, "terraform-google-", "") - log.Printf("Module name set from remote to %s", moduleName) - return moduleName, moduleRegistry, nil -} - -// findSubModules generates slice of LocalTerraformModule for submodules -func findSubModules(path, rootModuleFQN string) []LocalTerraformModule { - var subModules = make([]LocalTerraformModule, 0) - // if no modules dir, return empty slice - if _, err := os.Stat(path); err != nil { - log.Print("No submodules found") - return subModules - } - files, err := ioutil.ReadDir(path) - if err != nil { - log.Fatalf("Error finding submodules: %v", err) - } - absPath, err := filepath.Abs(path) - if err != nil { - log.Fatalf("Error finding submodule absolute path: %v", err) - } - for _, f := range files { - if f.IsDir() { - subModules = append(subModules, LocalTerraformModule{f.Name(), filepath.Join(absPath, f.Name()), fmt.Sprintf("%s//modules/%s", rootModuleFQN, f.Name())}) - } - } - return subModules -} - -// restoreModules restores old config as marked by restoreMarker -func restoreModules(f []byte, p string) ([]byte, error) { - if _, err := os.Stat(p); err != nil { - return nil, err - } - strFile := string(f) - if !strings.Contains(strFile, restoreMarker) { - return nil, nil - } - lines := strings.Split(strFile, linebreak) - for i, line := range lines { - if strings.Contains(line, restoreMarker) { - lines[i] = strings.Split(line, restoreMarker)[1] - } - } - return []byte(strings.Join(lines, linebreak)), nil -} - -// replaceLocalModules swaps current local module registry references with local path -func replaceLocalModules(f []byte, p string) ([]byte, error) { - if _, err := os.Stat(p); err != nil { - return nil, err - } - absPath, err := filepath.Abs(filepath.Dir(p)) - if err != nil { - return nil, fmt.Errorf("Error finding example absolute path: %v", err) - } - strFile := string(f) - lines := strings.Split(strFile, linebreak) - for _, localModule := range localModules { - // check if current file has module/submodules references that should be swapped - if !strings.Contains(strFile, localModule.ModuleFQN) { - continue - } - // get relative path from example to local module - newModulePath, err := filepath.Rel(absPath, localModule.Dir) - if err != nil { - return nil, fmt.Errorf("Error finding relative path: %v", err) - } - for i, line := range lines { - if strings.Contains(line, fmt.Sprintf("\"%s\"", localModule.ModuleFQN)) && !strings.Contains(line, restoreMarker) { - // swap with local module and add restore point - leadingWhiteSpace := line[:strings.Index(line, "source")] - newSource := fmt.Sprintf("source = \"%s\"", newModulePath) - lines[i] = leadingWhiteSpace + newSource + fmt.Sprintf(" # %s %s", restoreMarker, line) - // if next line is a version declaration, disable that as well - if i < len(lines)-1 && strings.Contains(lines[i+1], "version") { - leadingWhiteSpace = lines[i+1][:strings.Index(lines[i+1], "version")] - lines[i+1] = fmt.Sprintf("%s# %s %s", leadingWhiteSpace, restoreMarker, lines[i+1]) - } - } - } - } - newExample := strings.Join(lines, linebreak) - // check if any swaps have been made - if newExample == strFile { - return nil, nil - } - // print diff info - log.Printf("Modifications made to file %s", p) - diff := difflib.UnifiedDiff{ - A: difflib.SplitLines(strFile), - B: difflib.SplitLines(newExample), - FromFile: "Original", - ToFile: "Modified", - Context: 3, - } - diffInfo, _ := difflib.GetUnifiedDiffString(diff) - log.Println(diffInfo) - return []byte(newExample), nil - -} - -// getTFFiles returns a slice of valid TF file paths -func getTFFiles(path string) []string { - // validate path - if _, err := os.Stat(path); err != nil { - log.Fatal(fmt.Errorf("Unable to find %s : %v", path, err)) - } - var files = make([]string, 0) - filepath.Walk(path, func(path string, info os.FileInfo, err error) error { - if err != nil && info.IsDir() { - return nil - } - isTFFile, _ := filepath.Match(terraformExtension, filepath.Base(path)) - if isTFFile { - files = append(files, path) - } - return nil - }) - return files - -} - -func SwapModules(rootPath, moduleRegistrySuffix, subModulesDir, examplesDir string, restore bool) { - moduleName, moduleRegistryPrefix, err := getModuleNameRegistry(rootPath) - if err != nil { - log.Fatal(err) - } - - // add root module to slice of localModules - localModules = append(localModules, LocalTerraformModule{moduleName, rootPath, fmt.Sprintf("%s/%s/%s", moduleRegistryPrefix, moduleName, moduleRegistrySuffix)}) - examplesPath := fmt.Sprintf("%s/%s", rootPath, examplesDir) - subModulesPath := fmt.Sprintf("%s/%s", rootPath, subModulesDir) - - // add submodules, if any to localModules - submods := findSubModules(subModulesPath, localModules[0].ModuleFQN) - localModules = append(localModules, submods...) - - // find all TF files in examples dir to process - exampleTFFiles := getTFFiles(examplesPath) - for _, TFFilePath := range exampleTFFiles { - file, err := ioutil.ReadFile(TFFilePath) - if err != nil { - log.Printf("Error reading file: %v", err) - } - - var newFile []byte - if restore { - newFile, err = restoreModules(file, TFFilePath) - } else { - newFile, err = replaceLocalModules(file, TFFilePath) - } - if err != nil { - log.Printf("Error processing file: %v", err) - } - - if newFile != nil { - err = ioutil.WriteFile(TFFilePath, newFile, 0644) - if err != nil { - log.Printf("Error writing file: %v", err) - } - } - - } - -} diff --git a/infra/build/developer-tools/build/scripts/module-swapper/cmd/swap_test.go b/infra/build/developer-tools/build/scripts/module-swapper/cmd/swap_test.go deleted file mode 100644 index 522f908897b..00000000000 --- a/infra/build/developer-tools/build/scripts/module-swapper/cmd/swap_test.go +++ /dev/null @@ -1,219 +0,0 @@ -package cmd - -import ( - "bytes" - "fmt" - "io/ioutil" - "log" - "os" - "path/filepath" - "reflect" - "strings" - "testing" - - "github.com/go-git/go-git/v5" - "github.com/go-git/go-git/v5/config" -) - -var ( - moduleRegistryPrefix = "terraform-google-modules" - moduleRegistrySuffix = "google" -) - -func getAbsPathHelper(p string) string { - a, err := filepath.Abs(p) - if err != nil { - log.Fatalf("Unable to find absolute path %s: %v", p, err) - } - return a -} - -func getFileHelper(p string) []byte { - f, err := ioutil.ReadFile(p) - if err != nil { - log.Fatalf("Error reading file: %v", err) - } - return f -} - -func setupProcessFileTest(modules []LocalTerraformModule) { - localModules = modules -} - -func tearDownProcessFileTest() { - localModules = []LocalTerraformModule{} -} - -func Test_getTFFiles(t *testing.T) { - type args struct { - path string - } - tests := []struct { - name string - args args - want []string - }{ - {"simple", args{"testdata/example-module-simple"}, []string{"testdata/example-module-simple/examples/example-one/main.tf", "testdata/example-module-simple/examples/main.tf"}}, - {"simple-single-submodule", args{"testdata/example-module-with-submodules/modules/bar-module"}, []string{"testdata/example-module-with-submodules/modules/bar-module/main.tf"}}, - {"simple-single-submodule-empty", args{"testdata/example-module-with-submodules/docs"}, []string{}}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := getTFFiles(tt.args.path); !reflect.DeepEqual(got, tt.want) { - t.Errorf("getTFFiles() = %v, want %v", got, tt.want) - } - }) - } -} - -func Test_findSubModules(t *testing.T) { - type args struct { - path string - rootModuleFQN string - } - tests := []struct { - name string - args args - want []LocalTerraformModule - }{ - {"simple-no-submodules", args{"testdata/example-module-simple/modules", "terraform-google-modules/example-module-simple/google"}, []LocalTerraformModule{}}, - {"simple-with-submodules", args{"testdata/example-module-with-submodules/modules", "terraform-google-modules/example-module-with-submodules/google"}, - []LocalTerraformModule{ - {"bar-module", filepath.Join(getAbsPathHelper("testdata/example-module-with-submodules/modules"), "bar-module"), "terraform-google-modules/example-module-with-submodules/google//modules/bar-module"}, - {"foo-module", filepath.Join(getAbsPathHelper("testdata/example-module-with-submodules/modules"), "foo-module"), "terraform-google-modules/example-module-with-submodules/google//modules/foo-module"}, - }}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := findSubModules(tt.args.path, tt.args.rootModuleFQN); !reflect.DeepEqual(got, tt.want) { - t.Errorf("findSubModules() = %v, want %v", got, tt.want) - } - }) - } -} - -func getProcessFileTestArgs(p, m string) struct { - f []byte - p string - modules []LocalTerraformModule -} { - f := struct { - f []byte - p string - modules []LocalTerraformModule - }{ - getFileHelper(p), - p, - append( - findSubModules("testdata/"+m+"/modules", "terraform-google-modules/"+m+"/google"), - LocalTerraformModule{m, getAbsPathHelper("testdata/" + m), fmt.Sprintf("%s/%s/%s", moduleRegistryPrefix, m, moduleRegistrySuffix)}, - ), - } - return f -} - -func Test_processFile(t *testing.T) { - type args struct { - f []byte - p string - modules []LocalTerraformModule - } - tests := []struct { - name string - args args - want []byte - wantErr bool - }{ - {"simple", getProcessFileTestArgs("testdata/example-module-simple/examples/example-one/main.tf", "example-module-simple"), getFileHelper("testdata/example-module-simple/examples/example-one/main.tf.good"), false}, - {"simple-submodules-single-submod", getProcessFileTestArgs("testdata/example-module-with-submodules/examples/example-one/main.tf", "example-module-with-submodules"), getFileHelper("testdata/example-module-with-submodules/examples/example-one/main.tf.good"), false}, - {"simple-submodules-multiple-modules", getProcessFileTestArgs("testdata/example-module-with-submodules/examples/example-two/main.tf", "example-module-with-submodules"), getFileHelper("testdata/example-module-with-submodules/examples/example-two/main.tf.good"), false}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - setupProcessFileTest(tt.args.modules) - var buf bytes.Buffer - log.SetOutput(&buf) - defer func() { - log.SetOutput(os.Stderr) - }() - got, err := replaceLocalModules(tt.args.f, tt.args.p) - t.Log(buf.String()) - if (err != nil) != tt.wantErr { - t.Errorf("processFile() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("processFile() = %v, want %v", string(got), string(tt.want)) - } - tearDownProcessFileTest() - }) - } -} - -func getTempDir() string { - d, err := ioutil.TempDir("", "gitrmtest") - if err != nil { - log.Fatalf("Error creating tempdir: %v", err) - } - return d -} - -func tempGitRepoWithRemote(repoURL, remote string) string { - dir := getTempDir() - r, err := git.PlainInit(dir, true) - if err != nil { - log.Fatalf("Error creating repo in tempdir: %v", err) - } - _, err = r.CreateRemote(&config.RemoteConfig{ - Name: remote, - URLs: []string{repoURL}, - }) - if err != nil { - log.Fatalf("Error creating remote in tempdir repo: %v", err) - } - return dir -} - -func Test_getModuleNameRegistry(t *testing.T) { - type args struct { - dir string - } - tests := []struct { - name string - args args - want string - want1 string - wantErr bool - wantErrStr string - }{ - {"simple", args{tempGitRepoWithRemote("https://github.com/foo/terraform-google-bar", "origin")}, "bar", "foo", false, ""}, - {"simple-with-trailing-slash", args{tempGitRepoWithRemote("https://github.com/foo/terraform-google-bar/", "origin")}, "bar", "foo", false, ""}, - {"simple-with-trailing-git", args{tempGitRepoWithRemote("https://github.com/foo/terraform-google-bar.git", "origin")}, "bar", "foo", false, ""}, - {"err-no-remote-origin", args{tempGitRepoWithRemote("https://github.com/foo/terraform-google-bar", "foo")}, "", "", true, ""}, - {"err-not-git-repo", args{getTempDir()}, "", "", true, ""}, - {"err-not-github-repo", args{tempGitRepoWithRemote("https://gitlab.com/foo/terraform-google-bar", "origin")}, "", "", true, "Expected GitHub remote of form https://github.com/ModuleRegistry/ModuleRepo"}, - {"err-not-prefixed-repo", args{tempGitRepoWithRemote("https://github.com/foo/bar", "origin")}, "", "", true, "Expected to find repo name prefixed with terraform-google-"}, - {"err-malformed-remote", args{tempGitRepoWithRemote("https://github.com/footerraform-google-bar", "origin")}, "", "", true, "Expected GitHub org/owner of form ModuleRegistry/ModuleRepo"}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, got1, err := getModuleNameRegistry(tt.args.dir) - if (err != nil) != tt.wantErr { - t.Errorf("getModuleNameRegistry() error = %v, wantErr %v", err, tt.wantErr) - return - } else { - if tt.wantErrStr != "" { - if !strings.Contains(err.Error(), tt.wantErrStr) { - t.Errorf("getModuleNameRegistry() error = %v, expected to contain %v", err, tt.wantErrStr) - } - } - } - if got != tt.want { - t.Errorf("getModuleNameRegistry() got = %v, want %v", got, tt.want) - } - if got1 != tt.want1 { - t.Errorf("getModuleNameRegistry() got1 = %v, want %v", got1, tt.want1) - } - }) - } -} diff --git a/infra/build/developer-tools/build/scripts/module-swapper/cmd/testdata/example-module-simple/examples/example-one/main.tf b/infra/build/developer-tools/build/scripts/module-swapper/cmd/testdata/example-module-simple/examples/example-one/main.tf deleted file mode 100644 index 443b8d53821..00000000000 --- a/infra/build/developer-tools/build/scripts/module-swapper/cmd/testdata/example-module-simple/examples/example-one/main.tf +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Copyright 2018 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -provider "google" { - version = "~> 3.0" -} - -# [START cloudrouter_create] -module "cloud_router" { - source = "terraform-google-modules/example-module-simple/google" - version = "~> 0.4" - - name = "my-router" - region = "us-central1" - - bgp = { - # The ASN (16550, 64512 - 65534, 4200000000 - 4294967294) can be any private ASN - # not already used as a peer ASN in the same region and network or 16550 for Partner Interconnect. - asn = "65001" - } - - # project = "my-project-id" - project = var.project - # network = "my-network" - network = var.network -} -# [END cloudrouter_create] - -# Unrelated module -module "vpc" { - source = "terraform-google-modules/network/google" - version = "~> 2.0.0" - network_name = "example-vpc" - - routes = [ - { - name = "egress-internet" - description = "route through IGW to access internet" - destination_range = "0.0.0.0/0" - tags = "egress-inet" - next_hop_internet = "true" - }, - { - name = "app-proxy" - description = "route through proxy to reach app" - destination_range = "10.50.10.0/24" - tags = "app-proxy" - next_hop_instance = "app-proxy-instance" - next_hop_instance_zone = "us-west1-a" - }, - ] -} diff --git a/infra/build/developer-tools/build/scripts/module-swapper/cmd/testdata/example-module-simple/examples/example-one/main.tf.good b/infra/build/developer-tools/build/scripts/module-swapper/cmd/testdata/example-module-simple/examples/example-one/main.tf.good deleted file mode 100644 index 2bc99efedda..00000000000 --- a/infra/build/developer-tools/build/scripts/module-swapper/cmd/testdata/example-module-simple/examples/example-one/main.tf.good +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Copyright 2018 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -provider "google" { - version = "~> 3.0" -} - -# [START cloudrouter_create] -module "cloud_router" { - source = "../.." # [restore-marker] source = "terraform-google-modules/example-module-simple/google" - # [restore-marker] version = "~> 0.4" - - name = "my-router" - region = "us-central1" - - bgp = { - # The ASN (16550, 64512 - 65534, 4200000000 - 4294967294) can be any private ASN - # not already used as a peer ASN in the same region and network or 16550 for Partner Interconnect. - asn = "65001" - } - - # project = "my-project-id" - project = var.project - # network = "my-network" - network = var.network -} -# [END cloudrouter_create] - -# Unrelated module -module "vpc" { - source = "terraform-google-modules/network/google" - version = "~> 2.0.0" - network_name = "example-vpc" - - routes = [ - { - name = "egress-internet" - description = "route through IGW to access internet" - destination_range = "0.0.0.0/0" - tags = "egress-inet" - next_hop_internet = "true" - }, - { - name = "app-proxy" - description = "route through proxy to reach app" - destination_range = "10.50.10.0/24" - tags = "app-proxy" - next_hop_instance = "app-proxy-instance" - next_hop_instance_zone = "us-west1-a" - }, - ] -} diff --git a/infra/build/developer-tools/build/scripts/module-swapper/cmd/testdata/example-module-with-submodules/examples/example-one/main.tf.good b/infra/build/developer-tools/build/scripts/module-swapper/cmd/testdata/example-module-with-submodules/examples/example-one/main.tf.good deleted file mode 100644 index 80b6b812634..00000000000 --- a/infra/build/developer-tools/build/scripts/module-swapper/cmd/testdata/example-module-with-submodules/examples/example-one/main.tf.good +++ /dev/null @@ -1,27 +0,0 @@ -module "test-module" { - source = "../.." # [restore-marker] source = "terraform-google-modules/example-module-with-submodules/google" - # [restore-marker] version = "~> 3.2.0" - - project_id = var.project_id # Replace this with your project ID in quotes - network_name = "my-custom-mode-network" - mtu = 1460 -} - -module "test-submodule-module" { - source = "../../modules/bar-module" # [restore-marker] source = "terraform-google-modules/example-module-with-submodules/google//modules/bar-module" - # [restore-marker] version = "~> 3.2.0" - - project_id = var.project_id # Replace this with your project ID in quotes - network_name = "my-custom-mode-network" - mtu = 1460 -} - -# Unrelated submodule -module "test-unrelated-submodule-module" { - source = "terraform-google-modules/foo/google" - version = "~> 3.2.0" - - project_id = var.project_id # Replace this with your project ID in quotes - network_name = "my-custom-mode-network" - mtu = 1460 -} diff --git a/infra/build/developer-tools/build/scripts/module-swapper/cmd/testdata/example-module-with-submodules/examples/example-two/main.tf.good b/infra/build/developer-tools/build/scripts/module-swapper/cmd/testdata/example-module-with-submodules/examples/example-two/main.tf.good deleted file mode 100644 index baf29e494af..00000000000 --- a/infra/build/developer-tools/build/scripts/module-swapper/cmd/testdata/example-module-with-submodules/examples/example-two/main.tf.good +++ /dev/null @@ -1,46 +0,0 @@ -# Unrelated module -module "test-unrelated-submodule" { - source = "terraform-google-modules/foo/google" - version = "~> 3.2.0" - - project_id = var.project_id # Replace this with your project ID in quotes - network_name = "my-custom-mode-network" - mtu = 1460 -} - -module "test-module" { - source = "../.." # [restore-marker] source = "terraform-google-modules/example-module-with-submodules/google" - # [restore-marker] version = "~> 3.2.0" - - project_id = var.project_id # Replace this with your project ID in quotes - network_name = "my-custom-mode-network" - mtu = 1460 -} - -module "test-submodule-1" { - source = "../../modules/bar-module" # [restore-marker] source = "terraform-google-modules/example-module-with-submodules/google//modules/bar-module" - # [restore-marker] version = "~> 3.2.0" - - project_id = var.project_id # Replace this with your project ID in quotes - network_name = "my-custom-mode-network" - mtu = 1460 -} - -module "test-submodule-2" { - source = "../../modules/foo-module" # [restore-marker] source = "terraform-google-modules/example-module-with-submodules/google//modules/foo-module" - # [restore-marker] version = "~> 3.2.0" - - project_id = var.project_id # Replace this with your project ID in quotes - network_name = "my-custom-mode-network" - mtu = 1460 -} - -# Unrelated submodule -module "test-unrelated-submodule" { - source = "terraform-google-modules/foo/google//modules/bar-module" - version = "~> 3.2.0" - - project_id = var.project_id # Replace this with your project ID in quotes - network_name = "my-custom-mode-network" - mtu = 1460 -} diff --git a/infra/build/developer-tools/build/scripts/module-swapper/go.mod b/infra/build/developer-tools/build/scripts/module-swapper/go.mod deleted file mode 100644 index b371babc482..00000000000 --- a/infra/build/developer-tools/build/scripts/module-swapper/go.mod +++ /dev/null @@ -1,8 +0,0 @@ -module github.com/GoogleCloudPlatform/infra/developer-tools/build/scripts/module-swapper - -go 1.14 - -require ( - github.com/go-git/go-git/v5 v5.3.0 - github.com/pmezard/go-difflib v1.0.0 -) diff --git a/infra/build/developer-tools/build/scripts/module-swapper/go.sum b/infra/build/developer-tools/build/scripts/module-swapper/go.sum deleted file mode 100644 index 11431b97da2..00000000000 --- a/infra/build/developer-tools/build/scripts/module-swapper/go.sum +++ /dev/null @@ -1,93 +0,0 @@ -github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= -github.com/Microsoft/go-winio v0.4.16 h1:FtSW/jqD+l4ba5iPBj9CODVtgfYAD8w2wS923g/cFDk= -github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= -github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs= -github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= -github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0= -github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= -github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= -github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= -github.com/go-git/go-billy/v5 v5.0.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-billy/v5 v5.1.0 h1:4pl5BV4o7ZG/lterP4S6WzJ6xr49Ba5ET9ygheTYahk= -github.com/go-git/go-billy/v5 v5.1.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-git-fixtures/v4 v4.0.2-0.20200613231340-f56387b50c12 h1:PbKy9zOy4aAKrJ5pibIRpVO2BXnK1Tlcg+caKI7Ox5M= -github.com/go-git/go-git-fixtures/v4 v4.0.2-0.20200613231340-f56387b50c12/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw= -github.com/go-git/go-git/v5 v5.3.0 h1:8WKMtJR2j8RntEXR/uvTKagfEt4GYlwQ7mntE4+0GWc= -github.com/go-git/go-git/v5 v5.3.0/go.mod h1:xdX4bWJ48aOrdhnl2XqHYstHbbp6+LFS4r4X+lNVprw= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= -github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= -github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= -github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 h1:DowS9hvgyYSX4TO5NpyC606/Z4SxnNYbT+WX27or6Ck= -github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/xanzy/ssh-agent v0.3.0 h1:wUMzuKtKilRgBAD1sUb8gOwwRr2FGoBVumcjoOACClI= -github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= -golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 h1:It14KIkyBFYkHkwZ7k45minvA9aorojkyjGk9KJ5B/w= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210326060303-6b1517762897 h1:KrsHThm5nFk34YtATK1LsThyGhGbGe1olrte/HInHvs= -golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210324051608-47abb6519492 h1:Paq34FxTluEPvVyayQqMPgHm+vTOrIifmcYxFBx9TLg= -golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= -gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/infra/build/developer-tools/build/scripts/module-swapper/main.go b/infra/build/developer-tools/build/scripts/module-swapper/main.go deleted file mode 100644 index 474ee47ad2a..00000000000 --- a/infra/build/developer-tools/build/scripts/module-swapper/main.go +++ /dev/null @@ -1,28 +0,0 @@ -package main - -import ( - "flag" - "log" - "os" - - "github.com/GoogleCloudPlatform/infra/developer-tools/build/scripts/module-swapper/cmd" -) - -func main() { - workDir := flag.String("workdir", "", "Absolute path to root module where examples should be swapped. Defaults to working directory") - subModulesDir := flag.String("submods-path", "modules", "Path to a submodules if any that maybe referenced. Defaults to working dir/modules") - examplesDir := flag.String("examples-path", "examples", "Path to examples that should be swapped. Defaults to cwd/examples") - moduleRegistrySuffix := flag.String("registry-suffix", "google", "Module registry suffix") - restore := flag.Bool("restore", false, "Restores disabled modules") - flag.Parse() - rootPath := *workDir - // if no workDir specified default to current working directory - if rootPath == "" { - cwd, err := os.Getwd() - if err != nil { - log.Fatalf("Unable to get cwd: %v", err) - } - rootPath = cwd - } - cmd.SwapModules(rootPath, *moduleRegistrySuffix, *subModulesDir, *examplesDir, *restore) -} diff --git a/infra/build/developer-tools/build/scripts/task_helper_functions.sh b/infra/build/developer-tools/build/scripts/task_helper_functions.sh index 7915d9ff357..f4f40dc8745 100755 --- a/infra/build/developer-tools/build/scripts/task_helper_functions.sh +++ b/infra/build/developer-tools/build/scripts/task_helper_functions.sh @@ -18,7 +18,7 @@ # setup_trap_handler() and used by maketemp() finish() { if [[ -n "${DELETE_AT_EXIT:-}" ]]; then - rm -rf "${DELETE_AT_EXIT}" + rm -rf "$DELETE_AT_EXIT" fi } @@ -27,7 +27,10 @@ finish() { # for use with maketemp() to automatically clean up temporary files, especially # those used to store credentials. setup_trap_handler() { - readonly DELETE_AT_EXIT="$(mktemp -d)" + if [[ -z "${DELETE_AT_EXIT+x}" ]]; then + DELETE_AT_EXIT="$(mktemp -d)" + readonly DELETE_AT_EXIT + fi trap finish EXIT } @@ -74,6 +77,11 @@ find_files() { ".*/.*\.jpg" ".*/.*\.jpeg" ".*/.*\.svg" + ".*/.*\.ico" + ".*/.*\.jar" + ".*/.*\.parquet" + ".*/.*\.pb" + ".*/.*\.index" "\./autogen" "\./test/fixtures/all_examples" "\./test/fixtures/shared" @@ -147,8 +155,8 @@ function lint_docker() { # This function creates TF_PLUGIN_CACHE_DIR if TF_PLUGIN_CACHE_DIR envvar is set function init_tf_plugin_cache() { - if [[ ! -z "${TF_PLUGIN_CACHE_DIR}" ]]; then - mkdir -p ${TF_PLUGIN_CACHE_DIR} + if [[ -n "$TF_PLUGIN_CACHE_DIR" ]]; then + mkdir -p "$TF_PLUGIN_CACHE_DIR" fi } @@ -232,7 +240,7 @@ check_whitespace() { local rc echo "Checking for trailing whitespace" find_files . -print \ - | grep -v -E '\.(pyc|png|gz|tfvars)$' \ + | grep -v -E '\.(pyc|png|gz|swp|tfvars|mp4|zip|ico|jar|parquet|pb|index)$' \ | compat_xargs grep -H -n '[[:blank:]]$' rc=$? if [[ ${rc} -eq 0 ]]; then @@ -243,7 +251,7 @@ check_whitespace() { fi echo "Checking for missing newline at end of file" find_files . -print \ - | grep -v -E '\.(png|gz|tfvars)$' \ + | grep -v -E '\.(png|gz|tfvars|mp4|zip|ico|jar|parquet|pb|index)$' \ | compat_xargs check_eof_newline return $((rc+$?)) } @@ -305,6 +313,93 @@ function generate_docs() { done < <(find_files . -name '*.tf' -print0 \ | compat_xargs -0 -n1 dirname \ | sort -u) + + # disable opt in after https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1353 + if [[ "${ENABLE_BPMETADATA:-}" -ne 1 ]]; then + echo "ENABLE_BPMETADATA not set to 1. Skipping metadata generation." + return 0 + fi + generate_metadata "${1-default}" +} + +function generate_metadata() { + echo "Generating blueprint metadata" + arg=${1-default} + # check if metadata was request with parameters + if [ "${arg}" = "default" ]; then + cft blueprint metadata + elif [ "${arg}" = "display" ]; then + cft blueprint metadata -d + else + eval "cft blueprint metadata $arg" + fi + + if [ $? -ne 0 ]; then + echo "Warning! Unable to generate metadata." + return 1 + fi + # add headers since comments are not preserved with metadata generation + # TODO: b/260869608 + fix_headers +} + +function check_metadata() { + if [[ "${ENABLE_BPMETADATA:-}" -ne 1 ]]; then + echo "ENABLE_BPMETADATA not set to 1. Skipping metadata validation." + return 0 + fi + + echo "Validating blueprint metadata" + cft blueprint metadata -v + + if [ $? -eq 0 ]; then + echo "Success!" + else + echo "Warning! Unable to validate metadata." + fi +} + +function check_tflint() { + if [[ "${DISABLE_TFLINT:-}" ]]; then + echo "DISABLE_TFLINT set. Skipping tflint check." + return 0 + fi + local rval + setup_trap_handler + rval=0 + echo "Checking for tflint" + local path + while read -r path; do + local tflintCfg + # skip any tf configs under test/ + if [[ $path == "./test"* ]];then + echo "Skipping ${path}" + continue + fi + # load default ruleset + tflintCfg="/root/tflint/.tflint.example.hcl" + # load if local repo ruleset + if [[ -f "/workspace/.github/.tflint.repo.hcl" ]]; then + tflintCfg="/workspace/.github/.tflint.repo.hcl" + # if module, load tighter ruleset + elif [[ $path == "." || $path == "./modules"* || $path =~ "^[0-9]+-.*" ]]; then + tflintCfg="/root/tflint/.tflint.module.hcl" + fi + + cd "${path}" && echo "Working in ${path} using ${tflintCfg}..." + tflint --config=${tflintCfg} --no-color + rc=$? + if [[ "${rc}" -ne 0 ]]; then + echo "tflint failed ${path} " + ((rval++)) + else + echo "tflint passed ${path} " + fi + cd - >/dev/null + done < <(find_files . -name '*.tf' -print0 \ + | compat_xargs -0 -n1 dirname \ + | sort -u) + return $((rval)) } # Lint check to determine whether generate_docs() needs to be run by copying to @@ -318,18 +413,18 @@ function check_documentation() { rsync -axh \ --exclude '*/.terraform' \ --exclude '*/.kitchen' \ - --exclude '*/.git' \ --exclude 'autogen' \ --exclude '*/.tfvars' \ /workspace "${tempdir}" >/dev/null 2>/dev/null - cd "${tempdir}" + cd "${tempdir}/workspace" generate_docs >/dev/null 2>/dev/null + # TODO: (b/261241276) preserve verion no. for release PR diff -r \ --exclude=".terraform" \ --exclude=".kitchen" \ - --exclude=".git" \ --exclude="autogen" \ --exclude="*.tfvars" \ + --exclude="*metadata.yaml" \ /workspace "${tempdir}/workspace" rc=$? if [[ "${rc}" -ne 0 ]]; then @@ -347,6 +442,11 @@ function generate_modules() { if [[ -e /workspace/autogen_modules.json ]]; then autogen_modules=$(jq '.' /workspace/autogen_modules.json) python3 /usr/local/bin/generate_modules.py "$autogen_modules" + + # formatting the generated modules since formatting does not apply + # to jinja templates + echo "Running terraform fmt" + terraform fmt -recursive fi } @@ -372,7 +472,6 @@ function check_generate_modules() { rsync -axh \ --exclude '*/.terraform' \ --exclude '*/.kitchen' \ - --exclude '*/.git' \ /workspace "${tempdir}" >/dev/null 2>/dev/null cd "${tempdir}/workspace" || exit 1 generate_modules >/dev/null 2>/dev/null @@ -423,9 +522,9 @@ function fix_headers() { YEAR=$(date +'%Y') if [ $# -eq 0 ] then - find_files . for_header_check -type f -print0 | compat_xargs -0 addlicense -y $YEAR + find_files . for_header_check -type f -print0 | compat_xargs -0 addlicense -y "$YEAR" else - addlicense -y $YEAR "$@" + addlicense -y "$YEAR" "$@" fi } @@ -512,7 +611,7 @@ setup_environment() { source_test_env() { if [ -d test/setup ]; then # shellcheck disable=SC1091 - source <(python /usr/local/bin/export_tf_outputs.py --path=test/setup) + source <(python3 /usr/local/bin/export_tf_outputs.py --path=test/setup) else if [ -f test/source.sh ]; then echo "Warning: test/setup not found. Will only use test/source.sh to configure environment." @@ -604,7 +703,7 @@ run_terraform_validator() { terraform plan -input=false -out "$tmp_plan/plan.tfplan" || exit 1 terraform show -json "$tmp_plan/plan.tfplan" > "$tmp_plan/plan.json" || exit 1 - terraform-validator validate "$tmp_plan/plan.json" --policy-path="$policy_file_path" --project="$project" || exit 1 + gcloud beta terraform vet "$tmp_plan/plan.json" --policy-library="$policy_file_path" --project="$project" || exit 1 cd "$base_dir" || exit else diff --git a/infra/build/developer-tools/build/scripts/task_wrapper_scripts/lint_docker b/infra/build/developer-tools/build/scripts/task_wrapper_scripts/lint_docker index 105e22293fc..021b520975c 100755 --- a/infra/build/developer-tools/build/scripts/task_wrapper_scripts/lint_docker +++ b/infra/build/developer-tools/build/scripts/task_wrapper_scripts/lint_docker @@ -14,4 +14,3 @@ # limitations under the License. source /usr/local/bin/task_helper_functions.sh && lint_docker - diff --git a/infra/build/developer-tools/build/scripts/task_wrapper_scripts/terraform_docs.sh b/infra/build/developer-tools/build/scripts/task_wrapper_scripts/terraform_docs.sh index 74aaafb1f0e..59173ee2800 100755 --- a/infra/build/developer-tools/build/scripts/task_wrapper_scripts/terraform_docs.sh +++ b/infra/build/developer-tools/build/scripts/task_wrapper_scripts/terraform_docs.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Copyright 2019 Google LLC +# Copyright 2019-2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -62,7 +62,6 @@ terraform_docs() { ((index+=1)) done - readonly tmp_file=$(mktemp) readonly text_file="README.md" for path_uniq in $(echo "${paths[*]}" | tr ' ' '\n' | sort -u); do @@ -75,15 +74,7 @@ terraform_docs() { continue fi - terraform-docs --hide-all --show inputs --show outputs $args md ./ > "$tmp_file" - - # Replace content between markers with the placeholder - https://stackoverflow.com/questions/1212799/how-do-i-extract-lines-between-two-line-delimiters-in-perl#1212834 - perl -i -ne 'if (/BEGINNING OF PRE-COMMIT-TERRAFORM DOCS HOOK/../END OF PRE-COMMIT-TERRAFORM DOCS HOOK/) { print $_ if /BEGINNING OF PRE-COMMIT-TERRAFORM DOCS HOOK/; print "I_WANT_TO_BE_REPLACED\n$_" if /END OF PRE-COMMIT-TERRAFORM DOCS HOOK/;} else { print $_ }' "$text_file" - - # Replace placeholder with the content of the file - perl -i -e 'open(F, "'"$tmp_file"'"); $f = join "", ; while(<>){if (/I_WANT_TO_BE_REPLACED/) {print $f} else {print $_};}' "$text_file" - - rm -f "$tmp_file" + terraform-docs --output-template "\n{{ .Content }}\n\n" --anchor=false --show inputs --show outputs --output-file=$text_file $args md ./ popd > /dev/null done diff --git a/infra/build/developer-tools/build/scripts/task_wrapper_scripts/terraform_validate b/infra/build/developer-tools/build/scripts/task_wrapper_scripts/terraform_validate index 42df9ac62b3..172ba205279 100755 --- a/infra/build/developer-tools/build/scripts/task_wrapper_scripts/terraform_validate +++ b/infra/build/developer-tools/build/scripts/task_wrapper_scripts/terraform_validate @@ -21,6 +21,6 @@ set -eu curdir=$(pwd) cd "${1}" -terraform init -backend=false >/dev/null -terraform validate +flock -x /workspace/.terraform.lock -c "terraform init -backend=false >/dev/null" +flock -s /workspace/.terraform.lock -c "terraform validate" cd "$curdir" diff --git a/infra/build/developer-tools/build/scripts/test_lint.sh b/infra/build/developer-tools/build/scripts/test_lint.sh index bd24e8e56ae..9a6d8af2f97 100755 --- a/infra/build/developer-tools/build/scripts/test_lint.sh +++ b/infra/build/developer-tools/build/scripts/test_lint.sh @@ -1,5 +1,5 @@ #! /bin/bash -# Copyright 2019 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -25,19 +25,8 @@ source /usr/local/bin/task_helper_functions.sh set +e # constants -MARKDOWN=0 -MARKDOWN_STR="" -CONTRIBUTING_GUIDE="" -# shellcheck disable=SC2089,SC2016 # Quotes/backslashes will be treated literally, expressions don't expand -messages='{ - "check_generate_modules": "The modules need to be regenerated. Please run `make_build`.", - "check_documentation": "The documentation needs to be regenerated. Please run `make generate_docs`.", - "check_whitespace": "Failed whitespace check. More details below.", - "check_shell": "Failed shell check. More info on running shellcheck locally [here](https://www.shellcheck.net).", - "check_headers": "All files need a license header. Please make sure all your files include the appropriate header. A helper tool available [here](https://github.com/google/addlicense).", - "check_python": "Failed flake8 Python lint check.", - "check_terraform": "Failed Terraform check. More details below." -}' +# Store working dir as module-swapper needs to be executed in this dir. +CWD="${PWD}" rval=0 failed_tests=() tests=( @@ -47,58 +36,32 @@ tests=( check_shell check_headers check_python + check_tflint check_terraform + check_metadata ) -# parse args -for arg in "$@" -do - case $arg in - -m|--markdown) - MARKDOWN=1 - shift - ;; - -c=*|--contrib-guide=*) - CONTRIBUTING_GUIDE="${arg#*=}" - shift - ;; - *) # end argument parsing - shift - ;; - esac -done +if [[ -z "${DISABLE_MODULE_SWAPPER:-}" ]]; then + module-swapper +fi + +function restore { + cd "${CWD}" + module-swapper -restore >/dev/null 2>&1 + exit 1 +} +trap restore INT for test in "${tests[@]}"; do - # if not in markdown mode, pipe test output to stdout tty - # nested if condition is a workaround for test[[]] not echoing some outputs from check_* tests even with subshell - if [[ $MARKDOWN -eq 0 ]]; then - if ! "${test}"; then - failed_tests+=("${test}") - ((rval++)) - fi - # if control reaches here - in markdown mode, pipe test stderr to stdout for capture - elif ! output=$(${test} 2>&1); then - # add test name to list of failed_tests + if ! "${test}"; then failed_tests+=("${test}") ((rval++)) - # clean output color, sqash multiple empty blank lines - output=$(echo "$output" | sed -r "s/\x1b\[[0-9;]*m/\n/g" | tr -s '\n') - # try to get a helpful error message, otherwise unknown - error_help_message=$(echo "$messages" | jq --arg check_name "$test" -r '.[$check_name] // "đŸĻ– An unknown error has occurred" ') - #construct markdown body - MARKDOWN_STR+="- âš ī¸${test}\n ${error_help_message} \n \`\`\`bash \n${output}\n \`\`\` \n" fi done +module-swapper -restore >/dev/null 2>&1 -# if any tests have failed if [[ "${#failed_tests[@]}" -ne 0 ]]; then - # echo output in markdown - if [[ $MARKDOWN -eq 1 ]]; then - header="Thanks for the PR! 🚀\nUnfortunately it looks like some of our CI checks failed. See the [Contributing Guide](${CONTRIBUTING_GUIDE}) for details.\n" - echo -e "${header}${MARKDOWN_STR}" - else - # shellcheck disable=SC2145 # Output all elements of the array - echo "Error: The following tests have failed: ${failed_tests[@]}" - exit "${rval}" - fi + # shellcheck disable=SC2145 # Output all elements of the array + echo "Error: The following tests have failed: ${failed_tests[@]}" + exit "${rval}" fi diff --git a/infra/build/developer-tools/build/verify_boilerplate/verify_boilerplate.py b/infra/build/developer-tools/build/verify_boilerplate/verify_boilerplate.py index 14d3fe13c00..dfc157af4bb 100755 --- a/infra/build/developer-tools/build/verify_boilerplate/verify_boilerplate.py +++ b/infra/build/developer-tools/build/verify_boilerplate/verify_boilerplate.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Copyright 2019 Google LLC # diff --git a/infra/concourse/Makefile b/infra/concourse/Makefile deleted file mode 100644 index eea12731efa..00000000000 --- a/infra/concourse/Makefile +++ /dev/null @@ -1,25 +0,0 @@ -FLY_CMD := "fly" -PIPELINE_CONFS := $(wildcard ./pipelines/*.yml) -PIPELINES := $(foreach CONF, $(PIPELINE_CONFS), $(subst terraform-google-,,$(basename $(notdir $(CONF))))) - -.PHONY: $(PIPELINES) -$(PIPELINES): - $(FLY_CMD) -t cft set-pipeline \ - -p terraform-google-$@ \ - -c pipelines/terraform-google-$@.yml \ - -l vars/phoogle.yml - -check-%: - $(FLY_CMD) -t cft validate-pipeline -s \ - -c pipelines/terraform-google-$*.yml \ - -l vars/phoogle.yml - -POSTGRES_IP := 10.45.0.3 - -.PHONY: psql -psql: - kubectl run -i --tty --attach psql --image=postgres:9.6-alpine -- \ - psql -h ${POSTGRES_IP} -U concourse -d atc - kubectl delete deployment psql - -include ./Makefile.BUILD diff --git a/infra/concourse/Makefile.BUILD b/infra/concourse/Makefile.BUILD deleted file mode 100644 index 14cd0a12098..00000000000 --- a/infra/concourse/Makefile.BUILD +++ /dev/null @@ -1,80 +0,0 @@ -SHELL := /usr/bin/env bash # Make will use bash instead of sh - -BUILD_TERRAFORM_VERSION := 0.12.3 -BUILD_CLOUD_SDK_VERSION := 239.0.0 -BUILD_PROVIDER_GOOGLE_VERSION := 2.7.0 -BUILD_PROVIDER_GSUITE_VERSION := 0.1.22 -BUILD_RUBY_VERSION := 2.6.3 -# Make sure you update DOCKER_TAG_VERSION_TERRAFORM or DOCKER_TAG_VERSION_KITCHEN_TERRAFORM independently: -# If you make changes to the Docker.terraform file, update DOCKER_TAG_VERSION_TERRAFORM -# If you make changes to the Docker.kitchen-terraform file, update DOCKER_TAG_VERSION_KITCHEN_TERRAFORM -# Also make sure to update the version appropriately as described below -# Removing software components or upgrading a component to a backwards incompatible release should constitute a major release. -# Adding a component or upgrading a component to a backwards compatible release should constitute a minor release. -# Fixing bugs or making trivial changes should be considered a patch release. - -DOCKER_TAG_VERSION_TERRAFORM := 2.1.0 -DOCKER_TAG_VERSION_KITCHEN_TERRAFORM := 2.3.0 - -REGISTRY_URL := gcr.io/cloud-foundation-cicd - -DOCKER_IMAGE_LINT := cft/lint -DOCKER_TAG_LINT := 2.4.0 - -DOCKER_IMAGE_UNIT := cft/unit -DOCKER_TAG_UNIT := latest - -DOCKER_IMAGE_TERRAFORM := cft/terraform -DOCKER_IMAGE_KITCHEN_TERRAFORM := cft/kitchen-terraform - -.PHONY: build-image-lint -build-image-lint: - docker build -f build/Dockerfile.lint \ - --build-arg BUILD_TERRAFORM_VERSION=${BUILD_TERRAFORM_VERSION} \ - --build-arg BUILD_PROVIDER_GSUITE_VERSION=${BUILD_PROVIDER_GSUITE_VERSION} \ - -t ${DOCKER_IMAGE_LINT}:${DOCKER_TAG_LINT} . - -.PHONY: build-image-unit -build-image-unit: - docker build -f build/Dockerfile.unit \ - -t ${DOCKER_IMAGE_UNIT}:${DOCKER_TAG_UNIT} . - -.PHONY: build-image-terraform -build-image-terraform: - docker build -f build/Dockerfile.terraform \ - --build-arg BUILD_TERRAFORM_VERSION=${BUILD_TERRAFORM_VERSION} \ - --build-arg BUILD_CLOUD_SDK_VERSION=${BUILD_CLOUD_SDK_VERSION} \ - --build-arg BUILD_PROVIDER_GOOGLE_VERSION=${BUILD_PROVIDER_GOOGLE_VERSION} \ - --build-arg BUILD_PROVIDER_GSUITE_VERSION=${BUILD_PROVIDER_GSUITE_VERSION} \ - -t ${DOCKER_IMAGE_TERRAFORM}:${DOCKER_TAG_VERSION_TERRAFORM} . - -.PHONY: build-image-kitchen-terraform -build-image-kitchen-terraform: - docker build -f build/Dockerfile.kitchen-terraform \ - --build-arg BUILD_TERRAFORM_IMAGE=${REGISTRY_URL}/${DOCKER_IMAGE_TERRAFORM}:${DOCKER_TAG_VERSION_TERRAFORM} \ - --build-arg BUILD_RUBY_VERSION=${BUILD_RUBY_VERSION} \ - -t ${DOCKER_IMAGE_KITCHEN_TERRAFORM}:${DOCKER_TAG_VERSION_KITCHEN_TERRAFORM} . - -.PHONY: release-image-lint -release-image-lint: - docker tag ${DOCKER_IMAGE_LINT}:${DOCKER_TAG_LINT} \ - ${REGISTRY_URL}/${DOCKER_IMAGE_LINT}:${DOCKER_TAG_LINT} - docker push ${REGISTRY_URL}/${DOCKER_IMAGE_LINT}:${DOCKER_TAG_LINT} - -.PHONY: release-image-unit -release-image-unit: - docker tag ${DOCKER_IMAGE_UNIT}:${DOCKER_TAG_UNIT} \ - ${REGISTRY_URL}/${DOCKER_IMAGE_UNIT}:${DOCKER_TAG_UNIT} - docker push ${REGISTRY_URL}/${DOCKER_IMAGE_UNIT}:${DOCKER_TAG_UNIT} - -.PHONY: release-image-terraform -release-image-terraform: - docker tag ${DOCKER_IMAGE_TERRAFORM}:${DOCKER_TAG_VERSION_TERRAFORM} \ - ${REGISTRY_URL}/${DOCKER_IMAGE_TERRAFORM}:${DOCKER_TAG_VERSION_TERRAFORM} - docker push ${REGISTRY_URL}/${DOCKER_IMAGE_TERRAFORM}:${DOCKER_TAG_VERSION_TERRAFORM} - -.PHONY: release-image-kitchen-terraform -release-image-kitchen-terraform: - docker tag ${DOCKER_IMAGE_KITCHEN_TERRAFORM}:${DOCKER_TAG_VERSION_KITCHEN_TERRAFORM} \ - ${REGISTRY_URL}/${DOCKER_IMAGE_KITCHEN_TERRAFORM}:${DOCKER_TAG_VERSION_KITCHEN_TERRAFORM} - docker push ${REGISTRY_URL}/${DOCKER_IMAGE_KITCHEN_TERRAFORM}:${DOCKER_TAG_VERSION_KITCHEN_TERRAFORM} diff --git a/infra/concourse/build/Dockerfile.kitchen-terraform b/infra/concourse/build/Dockerfile.kitchen-terraform deleted file mode 100644 index 3faddd88fef..00000000000 --- a/infra/concourse/build/Dockerfile.kitchen-terraform +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -ARG BUILD_TERRAFORM_IMAGE -ARG BUILD_RUBY_VERSION -FROM $BUILD_TERRAFORM_IMAGE as cft-terraform - -FROM ruby:$BUILD_RUBY_VERSION-alpine - -RUN apk add --no-cache \ - bash \ - coreutils \ - curl \ - git \ - g++ \ - jq \ - make \ - musl-dev \ - openssh \ - python \ - python3 \ - ca-certificates - -SHELL ["/bin/bash", "-c"] - -ENV APP_BASE_DIR="/cft" - -RUN cd /tmp && \ - wget https://releases.hashicorp.com/packer/1.4.1/packer_1.4.1_linux_amd64.zip && \ - unzip packer_1.4.1_linux_amd64.zip && \ - rm -rf packer_1.4.1_linux_amd64.zip && \ - mv packer /bin/ - -ADD https://storage.googleapis.com/kubernetes-release/release/v1.12.2/bin/linux/amd64/kubectl /usr/local/bin/kubectl -RUN chmod +x /usr/local/bin/kubectl - -COPY --from=cft-terraform $APP_BASE_DIR $APP_BASE_DIR - -ENV HOME="$APP_BASE_DIR/home" -ENV PATH $APP_BASE_DIR/bin:$APP_BASE_DIR/google-cloud-sdk/bin:$PATH -ENV GOOGLE_APPLICATION_CREDENTIALS="$CREDENTIALS_PATH" \ - CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE="$CREDENTIALS_PATH" - -# Fix base64 inconsistency -SHELL ["/bin/bash", "-c"] -RUN echo 'base64() { if [[ $@ == "--decode" ]]; then command base64 -d | more; else command base64 "$@"; fi; }' >> $APP_BASE_DIR/home/.bashrc - -RUN terraform --version && \ - gcloud --version && \ - ruby --version && \ - bundle --version && \ - packer --version && \ - kubectl version --client=true - -WORKDIR /opt/kitchen -ADD ./build/data/Gemfile . -ADD ./build/data/Gemfile.lock . -ADD ./build/data/requirements.txt . -RUN bundle install && pip3 install -r requirements.txt - -WORKDIR $APP_BASE_DIR/workdir - -RUN gcloud components install beta --quiet -RUN gcloud components install alpha --quiet - -# Authenticate gcloud with service account credentials key to allow gsutil authentication -ADD ./build/scripts/gcloud_auth.sh $HOME/entrypoint_scripts/ -RUN chmod +x $HOME/entrypoint_scripts/gcloud_auth.sh -ENTRYPOINT ["/cft/home/entrypoint_scripts/gcloud_auth.sh"] diff --git a/infra/concourse/build/Dockerfile.lint b/infra/concourse/build/Dockerfile.lint deleted file mode 100644 index e36a2ded852..00000000000 --- a/infra/concourse/build/Dockerfile.lint +++ /dev/null @@ -1,49 +0,0 @@ -FROM alpine:3.8 - -RUN apk add --no-cache --update \ - bash \ - coreutils \ - findutils \ - make \ - go=1.10.8-r0 \ - python=2.7.15-r1 \ - python3 \ - py-pip=10.0.1-r0 \ - grep \ - git \ - perl - -RUN pip install flake8 jinja2 - -RUN wget https://shellcheck.storage.googleapis.com/shellcheck-v0.6.0.linux.x86_64.tar.xz && \ - tar -xf shellcheck-v0.6.0.linux.x86_64.tar.xz && \ - mv shellcheck-v0.6.0/shellcheck /usr/local/bin/ && \ - rm -r shellcheck-v0.6.0 shellcheck-v0.6.0.linux.x86_64.tar.xz -ARG BUILD_TERRAFORM_VERSION -ENV TERRAFORM_VERSION="${BUILD_TERRAFORM_VERSION}" - -RUN wget "https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_linux_amd64.zip" && \ - unzip terraform_${TERRAFORM_VERSION}_linux_amd64.zip && \ - rm terraform_${TERRAFORM_VERSION}_linux_amd64.zip && \ - mv terraform /usr/local/bin/ - -RUN wget https://github.com/hadolint/hadolint/releases/download/v1.15.0/hadolint-Linux-x86_64 && \ - chmod +x hadolint-Linux-x86_64 && \ - mv hadolint-Linux-x86_64 /usr/local/bin/hadolint - -RUN wget https://github.com/segmentio/terraform-docs/releases/download/v0.6.0/terraform-docs-v0.6.0-linux-amd64 && \ - mv terraform-docs* /usr/local/bin/terraform-docs && \ - chmod 0755 /usr/local/bin/terraform-docs - -RUN wget https://raw.githubusercontent.com/antonbabenko/pre-commit-terraform/master/terraform_docs.sh && \ - mv terraform_docs.sh /usr/local/bin/terraform_docs.sh && \ - chmod 0755 /usr/local/bin/terraform_docs.sh - -ARG BUILD_PROVIDER_GSUITE_VERSION -ENV PROVIDER_GSUITE_VERSION="${BUILD_PROVIDER_GSUITE_VERSION}" - -RUN wget "https://github.com/DeviaVir/terraform-provider-gsuite/releases/download/v${PROVIDER_GSUITE_VERSION}/terraform-provider-gsuite_${PROVIDER_GSUITE_VERSION}_linux_amd64.tgz" && \ - tar xzf terraform-provider-gsuite_${PROVIDER_GSUITE_VERSION}_linux_amd64.tgz && \ - rm terraform-provider-gsuite_${PROVIDER_GSUITE_VERSION}_linux_amd64.tgz && \ - install -m 0755 -d ~/.terraform.d/plugins/ && \ - mv terraform-provider-gsuite_v${PROVIDER_GSUITE_VERSION} ~/.terraform.d/plugins/ diff --git a/infra/concourse/build/Dockerfile.terraform b/infra/concourse/build/Dockerfile.terraform deleted file mode 100644 index 9455e93cefa..00000000000 --- a/infra/concourse/build/Dockerfile.terraform +++ /dev/null @@ -1,100 +0,0 @@ -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -FROM alpine:3.8 as builder - -RUN apk add --no-cache \ - bash \ - git \ - go \ - make \ - musl-dev - -ENV APP_BASE_DIR="/cft" - -RUN mkdir -p $APP_BASE_DIR/home && \ - mkdir -p $APP_BASE_DIR/bin && \ - mkdir -p $APP_BASE_DIR/workdir - -ENV GOPATH="/root/go" - -ARG BUILD_PROVIDER_GOOGLE_VERSION -ENV PROVIDER_GOOGLE_VERSION="${BUILD_PROVIDER_GOOGLE_VERSION}" - -RUN mkdir -p $APP_BASE_DIR/home/.terraform.d/plugins && \ - mkdir -p $GOPATH/src/github.com/terraform-providers && \ - cd $GOPATH/src/github.com/terraform-providers && \ - git clone https://github.com/terraform-providers/terraform-provider-google.git && \ - cd terraform-provider-google && \ - git fetch --all --tags --prune && \ - git checkout tags/v${PROVIDER_GOOGLE_VERSION} -b v${PROVIDER_GOOGLE_VERSION} && \ - make fmt && \ - make build && \ - mv $GOPATH/bin/terraform-provider-google \ - $APP_BASE_DIR/home/.terraform.d/plugins/terraform-provider-google_v${PROVIDER_GOOGLE_VERSION} - -FROM alpine:3.8 - -RUN apk add --no-cache \ - bash \ - curl \ - git \ - jq \ - make \ - python2 - -ENV APP_BASE_DIR="/cft" - -COPY --from=builder $APP_BASE_DIR $APP_BASE_DIR - -ENV HOME="$APP_BASE_DIR/home" -ENV PATH $APP_BASE_DIR/bin:$APP_BASE_DIR/google-cloud-sdk/bin:$PATH -ENV GOOGLE_APPLICATION_CREDENTIALS="$CREDENTIALS_PATH" \ - CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE="$CREDENTIALS_PATH" - -# Fix base64 inconsistency -SHELL ["/bin/bash", "-c"] -RUN echo 'base64() { if [[ $@ == "--decode" ]]; then command base64 -d | more; else command base64 "$@"; fi; }' >> $APP_BASE_DIR/home/.bashrc - -ARG BUILD_CLOUD_SDK_VERSION -ENV CLOUD_SDK_VERSION="${BUILD_CLOUD_SDK_VERSION}" - -RUN cd cft && \ - curl -LO https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-sdk-${CLOUD_SDK_VERSION}-linux-x86_64.tar.gz && \ - tar xzf google-cloud-sdk-${CLOUD_SDK_VERSION}-linux-x86_64.tar.gz && \ - rm google-cloud-sdk-${CLOUD_SDK_VERSION}-linux-x86_64.tar.gz && \ - ln -s /lib /lib64 && \ - gcloud config set core/disable_usage_reporting true && \ - gcloud config set component_manager/disable_update_check true && \ - gcloud config set metrics/environment github_docker_image && \ - gcloud --version - -ARG BUILD_TERRAFORM_VERSION -ENV TERRAFORM_VERSION="${BUILD_TERRAFORM_VERSION}" - -RUN curl -LO https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_linux_amd64.zip && \ - unzip terraform_${TERRAFORM_VERSION}_linux_amd64.zip && \ - rm terraform_${TERRAFORM_VERSION}_linux_amd64.zip && \ - mv terraform $APP_BASE_DIR/bin && \ - terraform --version - -ARG BUILD_PROVIDER_GSUITE_VERSION -ENV PROVIDER_GSUITE_VERSION="${BUILD_PROVIDER_GSUITE_VERSION}" - -RUN curl -LO https://github.com/DeviaVir/terraform-provider-gsuite/releases/download/v${PROVIDER_GSUITE_VERSION}/terraform-provider-gsuite_${PROVIDER_GSUITE_VERSION}_linux_amd64.tgz && \ - tar xzf terraform-provider-gsuite_${PROVIDER_GSUITE_VERSION}_linux_amd64.tgz && \ - rm terraform-provider-gsuite_${PROVIDER_GSUITE_VERSION}_linux_amd64.tgz && \ - mv terraform-provider-gsuite_v${PROVIDER_GSUITE_VERSION} $APP_BASE_DIR/home/.terraform.d/plugins/ - -WORKDIR $APP_BASE_DIR/workdir diff --git a/infra/concourse/build/Dockerfile.unit b/infra/concourse/build/Dockerfile.unit deleted file mode 100644 index 64221c52570..00000000000 --- a/infra/concourse/build/Dockerfile.unit +++ /dev/null @@ -1,11 +0,0 @@ -FROM alpine:3.8 - -RUN apk add --no-cache --update \ - bash \ - make \ - python=2.7.15-r1 \ - py-pip=10.0.1-r0 - -ADD ./build/data/requirements.txt . - -RUN pip install -r requirements.txt diff --git a/infra/concourse/build/data/Gemfile b/infra/concourse/build/data/Gemfile deleted file mode 100644 index 5cf8233a74b..00000000000 --- a/infra/concourse/build/data/Gemfile +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -source 'https://rubygems.org/' do - gem "kitchen-terraform", "~> 4.9" - gem "kubeclient", "~> 4.0" - gem "rest-client", "~> 2.0" -end diff --git a/infra/concourse/build/data/Gemfile.lock b/infra/concourse/build/data/Gemfile.lock deleted file mode 100644 index 0d858217da2..00000000000 --- a/infra/concourse/build/data/Gemfile.lock +++ /dev/null @@ -1,320 +0,0 @@ -GEM - remote: https://rubygems.org/ - specs: - addressable (2.6.0) - public_suffix (>= 2.0.2, < 4.0) - aws-eventstream (1.0.3) - aws-sdk (2.11.283) - aws-sdk-resources (= 2.11.283) - aws-sdk-core (2.11.283) - aws-sigv4 (~> 1.0) - jmespath (~> 1.0) - aws-sdk-resources (2.11.283) - aws-sdk-core (= 2.11.283) - aws-sigv4 (1.1.0) - aws-eventstream (~> 1.0, >= 1.0.2) - azure_graph_rbac (0.17.1) - ms_rest_azure (~> 0.11.0) - azure_mgmt_key_vault (0.17.4) - ms_rest_azure (~> 0.11.0) - azure_mgmt_resources (0.17.5) - ms_rest_azure (~> 0.11.1) - builder (3.2.3) - coderay (1.1.2) - concurrent-ruby (1.1.5) - declarative (0.0.10) - declarative-option (0.1.0) - diff-lcs (1.3) - docker-api (1.34.2) - excon (>= 0.47.0) - multi_json - domain_name (0.5.20180417) - unf (>= 0.0.5, < 1.0.0) - dry-configurable (0.8.3) - concurrent-ruby (~> 1.0) - dry-core (~> 0.4, >= 0.4.7) - dry-container (0.7.0) - concurrent-ruby (~> 1.0) - dry-configurable (~> 0.1, >= 0.1.3) - dry-core (0.4.7) - concurrent-ruby (~> 1.0) - dry-equalizer (0.2.2) - dry-inflector (0.1.2) - dry-logic (0.6.1) - concurrent-ruby (~> 1.0) - dry-core (~> 0.2) - dry-equalizer (~> 0.2) - dry-types (0.14.1) - concurrent-ruby (~> 1.0) - dry-container (~> 0.3) - dry-core (~> 0.4, >= 0.4.4) - dry-equalizer (~> 0.2) - dry-inflector (~> 0.1, >= 0.1.2) - dry-logic (~> 0.5, >= 0.5) - dry-validation (0.13.0) - concurrent-ruby (~> 1.0) - dry-configurable (~> 0.1, >= 0.1.3) - dry-core (~> 0.2, >= 0.2.1) - dry-equalizer (~> 0.2) - dry-logic (~> 0.5, >= 0.5.0) - dry-types (~> 0.14, >= 0.14) - equatable (0.5.0) - erubis (2.7.0) - excon (0.71.0) - faraday (0.15.4) - multipart-post (>= 1.2, < 3) - faraday-cookie_jar (0.0.6) - faraday (>= 0.7.4) - http-cookie (~> 1.0.0) - faraday_middleware (0.12.2) - faraday (>= 0.7.4, < 1.0) - ffi (1.11.1) - google-api-client (0.23.9) - addressable (~> 2.5, >= 2.5.1) - googleauth (>= 0.5, < 0.7.0) - httpclient (>= 2.8.1, < 3.0) - mime-types (~> 3.0) - representable (~> 3.0) - retriable (>= 2.0, < 4.0) - signet (~> 0.9) - googleauth (0.6.7) - faraday (~> 0.12) - jwt (>= 1.4, < 3.0) - memoist (~> 0.16) - multi_json (~> 1.11) - os (>= 0.9, < 2.0) - signet (~> 0.7) - gssapi (1.3.0) - ffi (>= 1.0.1) - gyoku (1.3.1) - builder (>= 2.1.2) - hashie (3.6.0) - htmlentities (4.3.4) - http (4.1.1) - addressable (~> 2.3) - http-cookie (~> 1.0) - http-form_data (~> 2.0) - http_parser.rb (~> 0.6.0) - http-accept (1.7.0) - http-cookie (1.0.3) - domain_name (~> 0.5) - http-form_data (2.1.1) - http_parser.rb (0.6.0) - httpclient (2.8.3) - inifile (3.0.0) - inspec (3.9.3) - addressable (~> 2.4) - faraday (>= 0.9.0) - faraday_middleware (~> 0.12.2) - hashie (~> 3.4) - htmlentities - json (>= 1.8, < 3.0) - method_source (~> 0.8) - mixlib-log - multipart-post - parallel (~> 1.9) - parslet (~> 1.5) - pry (~> 0) - rspec (~> 3) - rspec-its (~> 1.2) - rubyzip (~> 1.2, >= 1.2.2) - semverse - sslshake (~> 1.2) - term-ansicolor - thor (~> 0.20) - tomlrb (~> 1.2) - train (~> 1.5, >= 1.7.2) - train-habitat (~> 0.1) - tty-prompt (~> 0.17) - tty-table (~> 0.10) - jmespath (1.4.0) - json (2.1.0) - jwt (2.2.1) - kitchen-terraform (4.9.0) - dry-types (~> 0.14.0) - dry-validation (= 0.13.0) - inspec (~> 3.0) - json (~> 2.1.0) - mixlib-shellout (~> 2.2) - test-kitchen (~> 1.23) - tty-which (~> 0.4.0) - kubeclient (4.5.0) - http (>= 3.0, < 5.0) - recursive-open-struct (~> 1.0, >= 1.0.4) - rest-client (~> 2.0) - little-plugger (1.1.4) - logging (2.2.2) - little-plugger (~> 1.1) - multi_json (~> 1.10) - memoist (0.16.0) - method_source (0.9.2) - mime-types (3.2.2) - mime-types-data (~> 3.2015) - mime-types-data (3.2019.0331) - mixlib-install (3.11.18) - mixlib-shellout - mixlib-versioning - thor - mixlib-log (3.0.1) - mixlib-shellout (2.4.4) - mixlib-versioning (1.2.7) - ms_rest (0.7.4) - concurrent-ruby (~> 1.0) - faraday (~> 0.9) - timeliness (~> 0.3.10) - ms_rest_azure (0.11.1) - concurrent-ruby (~> 1.0) - faraday (~> 0.9) - faraday-cookie_jar (~> 0.0.6) - ms_rest (~> 0.7.4) - unf_ext (= 0.0.7.2) - multi_json (1.13.1) - multipart-post (2.1.1) - necromancer (0.4.0) - net-scp (1.2.1) - net-ssh (>= 2.6.5) - net-ssh (4.2.0) - net-ssh-gateway (1.3.0) - net-ssh (>= 2.6.5) - netrc (0.11.0) - nori (2.6.0) - os (1.0.1) - parallel (1.17.0) - parslet (1.8.2) - pastel (0.7.2) - equatable (~> 0.5.0) - tty-color (~> 0.4.0) - pry (0.12.2) - coderay (~> 1.1.0) - method_source (~> 0.9.0) - public_suffix (3.1.0) - recursive-open-struct (1.1.0) - representable (3.0.4) - declarative (< 0.1.0) - declarative-option (< 0.2.0) - uber (< 0.2.0) - rest-client (2.1.0) - http-accept (>= 1.7.0, < 2.0) - http-cookie (>= 1.0.2, < 2.0) - mime-types (>= 1.16, < 4.0) - netrc (~> 0.8) - retriable (3.1.2) - rspec (3.8.0) - rspec-core (~> 3.8.0) - rspec-expectations (~> 3.8.0) - rspec-mocks (~> 3.8.0) - rspec-core (3.8.0) - rspec-support (~> 3.8.0) - rspec-expectations (3.8.3) - diff-lcs (>= 1.2.0, < 2.0) - rspec-support (~> 3.8.0) - rspec-its (1.3.0) - rspec-core (>= 3.0.0) - rspec-expectations (>= 3.0.0) - rspec-mocks (3.8.0) - diff-lcs (>= 1.2.0, < 2.0) - rspec-support (~> 3.8.0) - rspec-support (3.8.0) - rubyntlm (0.6.2) - rubyzip (1.3.0) - semverse (3.0.0) - signet (0.11.0) - addressable (~> 2.3) - faraday (~> 0.9) - jwt (>= 1.5, < 3.0) - multi_json (~> 1.10) - sslshake (1.3.0) - strings (0.1.5) - strings-ansi (~> 0.1) - unicode-display_width (~> 1.5) - unicode_utils (~> 1.4) - strings-ansi (0.1.0) - term-ansicolor (1.7.1) - tins (~> 1.0) - test-kitchen (1.24.0) - mixlib-install (~> 3.6) - mixlib-shellout (>= 1.2, < 3.0) - net-scp (~> 1.1) - net-ssh (>= 2.9, < 5.0) - net-ssh-gateway (~> 1.2) - thor (~> 0.19) - winrm (~> 2.0) - winrm-elevated (~> 1.0) - winrm-fs (~> 1.1) - thor (0.20.3) - timeliness (0.3.10) - timers (4.3.0) - tins (1.20.2) - tomlrb (1.2.8) - train (1.7.6) - aws-sdk (~> 2) - azure_graph_rbac (~> 0.16) - azure_mgmt_key_vault (~> 0.17) - azure_mgmt_resources (~> 0.15) - docker-api (~> 1.26) - google-api-client (~> 0.23.9) - googleauth (~> 0.6.6) - inifile - json (>= 1.8, < 3.0) - mixlib-shellout (>= 2.0) - net-scp (~> 1.2) - net-ssh (>= 2.9, < 6.0) - winrm (~> 2.0) - winrm-fs (~> 1.0) - train-habitat (0.1.1) - train (>= 1.7.5, < 3.0) - tty-color (0.4.3) - tty-cursor (0.6.1) - tty-prompt (0.18.1) - necromancer (~> 0.4.0) - pastel (~> 0.7.0) - timers (~> 4.0) - tty-cursor (~> 0.6.0) - tty-reader (~> 0.5.0) - tty-reader (0.5.0) - tty-cursor (~> 0.6.0) - tty-screen (~> 0.6.4) - wisper (~> 2.0.0) - tty-screen (0.6.5) - tty-table (0.10.0) - equatable (~> 0.5.0) - necromancer (~> 0.4.0) - pastel (~> 0.7.2) - strings (~> 0.1.0) - tty-screen (~> 0.6.4) - tty-which (0.4.0) - uber (0.1.0) - unf (0.1.4) - unf_ext - unf_ext (0.0.7.2) - unicode-display_width (1.6.0) - unicode_utils (1.4.0) - winrm (2.3.2) - builder (>= 2.1.2) - erubis (~> 2.7) - gssapi (~> 1.2) - gyoku (~> 1.0) - httpclient (~> 2.2, >= 2.2.0.2) - logging (>= 1.6.1, < 3.0) - nori (~> 2.0) - rubyntlm (~> 0.6.0, >= 0.6.1) - winrm-elevated (1.1.1) - winrm (~> 2.0) - winrm-fs (~> 1.0) - winrm-fs (1.3.2) - erubis (~> 2.7) - logging (>= 1.6.1, < 3.0) - rubyzip (~> 1.1) - winrm (~> 2.0) - wisper (2.0.0) - -PLATFORMS - ruby - -DEPENDENCIES - kitchen-terraform (~> 4.9)! - kubeclient (~> 4.0)! - rest-client (~> 2.0)! - -BUNDLED WITH - 1.17.2 diff --git a/infra/concourse/build/data/requirements.txt b/infra/concourse/build/data/requirements.txt deleted file mode 100644 index b11234d92ba..00000000000 --- a/infra/concourse/build/data/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -google-api-python-client~=1.7 -google-auth~=1.6 diff --git a/infra/concourse/build/scripts/gcloud_auth.sh b/infra/concourse/build/scripts/gcloud_auth.sh deleted file mode 100644 index 63748591a26..00000000000 --- a/infra/concourse/build/scripts/gcloud_auth.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash -set -e - -gcloud auth activate-service-account --key-file=$GOOGLE_APPLICATION_CREDENTIALS - -exec "$@" \ No newline at end of file diff --git a/infra/concourse/pipelines/README.md b/infra/concourse/pipelines/README.md deleted file mode 100644 index 0cb6d3100e6..00000000000 --- a/infra/concourse/pipelines/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# Apply these pipelines - -See full document at [go/cft-module-ci][cft-module-ci] - -Take care to log in using: - - fly login --target cft -n cft -c https://concourse.infra.cft.tips - -Validate the pipeline: - - fly -t cft validate-pipeline -c pipelines/.yml - -Enforce the pipeline: - - make startup-scripts - -[cft-module-ci]: http://goto.google.com/cft-module-ci diff --git a/infra/concourse/pipelines/terraform-google-address.yml b/infra/concourse/pipelines/terraform-google-address.yml deleted file mode 100644 index 7f1fb2fe0e9..00000000000 --- a/infra/concourse/pipelines/terraform-google-address.yml +++ /dev/null @@ -1,131 +0,0 @@ -resource_types: - -- name: pull-request - type: docker-image - source: - repository: teliaoss/github-pr-resource - -resources: - -- name: pull-request - type: pull-request - webhook_token: ((address.github_webhook_token)) - source: - repository: terraform-google-modules/terraform-google-address - access_token: ((github.pr-access-token)) - -- name: lint-test-image - type: docker-image - source: - repository: gcr.io/cloud-foundation-cicd/cft/lint - tag: 2.4.0 - username: _json_key - password: ((sa.google)) - -- name: integration-test-image - type: docker-image - source: - repository: gcr.io/cloud-foundation-cicd/cft/kitchen-terraform - tag: 2.3.0 - username: _json_key - password: ((sa.google)) - -jobs: - -- name: lint-tests - public: true - plan: - - get: pull-request - trigger: true - version: every - - put: notify-lint-test-pending - resource: pull-request - params: - path: pull-request - context: lint-tests - status: pending - - get: lint-test-image - - task: run - image: lint-test-image - config: - platform: linux - inputs: - - name: pull-request - path: terraform-google-address - - name: lint-test-image - run: - path: make - args: ['-s'] - dir: terraform-google-address - on_success: - put: notify-lint-test-success - resource: pull-request - params: - path: pull-request - context: lint-tests - status: success - on_failure: - put: notify-lint-test-failure - resource: pull-request - params: - path: pull-request - context: lint-tests - status: failure - on_abort: - put: notify-lint-test-error - resource: pull-request - params: - path: pull-request - context: lint-tests - status: error - -- name: integration-tests - public: true - plan: - - get: pull-request - trigger: true - version: every - - put: notify-integration-test-pending - resource: pull-request - params: - path: pull-request - context: integration-tests - status: pending - - get: integration-test-image - trigger: true - - task: run-tests - image: integration-test-image - config: - platform: linux - inputs: - - name: pull-request - path: terraform-google-address - run: - path: make - args: ['test_integration'] - dir: terraform-google-address - params: - PROJECT_ID: ((address.phoogle_project_id)) - REGION: "us-east4" - SERVICE_ACCOUNT_JSON: ((address.phoogle_sa)) - on_success: - put: notify-integration-test-success - resource: pull-request - params: - path: pull-request - context: integration-tests - status: success - on_failure: - put: notify-integration-test-failure - resource: pull-request - params: - path: pull-request - context: integration-tests - status: failure - on_abort: - put: notify-integration-test-error - resource: pull-request - params: - path: pull-request - context: integration-tests - status: error diff --git a/infra/concourse/pipelines/terraform-google-jenkins.yml b/infra/concourse/pipelines/terraform-google-jenkins.yml deleted file mode 100644 index 0650912646d..00000000000 --- a/infra/concourse/pipelines/terraform-google-jenkins.yml +++ /dev/null @@ -1,134 +0,0 @@ -resource_types: - - - name: pull-request - type: docker-image - source: - repository: teliaoss/github-pr-resource - -resources: - - name: pull-request - type: pull-request - webhook_token: ((jenkins.github_webhook_token)) - source: - repository: terraform-google-modules/terraform-google-jenkins - access_token: ((github.pr-access-token)) - - - name: lint-test-image - type: docker-image - source: - repository: gcr.io/cloud-foundation-cicd/cft/lint - tag: 2.4.0 - username: _json_key - password: ((sa.google)) - - - name: integration-test-image - type: docker-image - source: - repository: gcr.io/cloud-foundation-cicd/cft/kitchen-terraform - tag: 2.3.0 - username: _json_key - password: ((sa.google)) - -jobs: - - - name: lint-tests - public: true - plan: - - get: pull-request - trigger: true - version: every - - put: notify-lint-test-pending - resource: pull-request - params: - path: pull-request - context: lint-tests - status: pending - - get: lint-test-image - trigger: true - - task: run - image: lint-test-image - config: - platform: linux - inputs: - - name: pull-request - path: terraform-google-jenkins - - name: lint-test-image - run: - path: make - args: - - '-s' - - check - dir: terraform-google-jenkins - on_success: - put: notify-lint-test-success - resource: pull-request - params: - path: pull-request - context: lint-tests - status: success - on_failure: - put: notify-lint-test-failure - resource: pull-request - params: - path: pull-request - context: lint-tests - status: failure - on_abort: - put: notify-lint-test-error - resource: pull-request - params: - path: pull-request - context: lint-tests - status: error - - - name: integration-tests - public: true - plan: - - get: pull-request - trigger: true - version: every - - put: notify-integration-test-pending - resource: pull-request - params: - path: pull-request - context: integration-tests - status: pending - - get: integration-test-image - trigger: true - - task: run-tests - image: integration-test-image - config: - platform: linux - inputs: - - name: pull-request - path: terraform-google-jenkins - run: - path: make - args: - - '-s' - - test_integration - dir: terraform-google-jenkins - params: - PROJECT_ID: ((jenkins.phoogle_project_id)) - SERVICE_ACCOUNT_JSON: ((jenkins.phoogle_sa)) - on_success: - put: notify-integration-test-success - resource: pull-request - params: - path: pull-request - context: integration-tests - status: success - on_failure: - put: notify-integration-test-failure - resource: pull-request - params: - path: pull-request - context: integration-tests - status: failure - on_abort: - put: notify-integration-test-error - resource: pull-request - params: - path: pull-request - context: integration-tests - status: error diff --git a/infra/concourse/pipelines/terraform-google-sql-db.yml b/infra/concourse/pipelines/terraform-google-sql-db.yml deleted file mode 100644 index fed4a34f34c..00000000000 --- a/infra/concourse/pipelines/terraform-google-sql-db.yml +++ /dev/null @@ -1,134 +0,0 @@ -resource_types: - - - name: pull-request - type: docker-image - source: - repository: teliaoss/github-pr-resource - -resources: - - name: pull-request - type: pull-request - webhook_token: ((sql_db.github_webhook_token)) - source: - repository: GoogleCloudPlatform/terraform-google-sql-db - access_token: ((github.pr-access-token)) - - - name: lint-test-image - type: docker-image - source: - repository: gcr.io/cloud-foundation-cicd/cft/lint - tag: 2.1.0 - username: _json_key - password: ((sa.google)) - - - name: integration-test-image - type: docker-image - source: - repository: gcr.io/cloud-foundation-cicd/cft/kitchen-terraform - tag: 2.0.0 - username: _json_key - password: ((sa.google)) - -jobs: - - - name: lint-tests - public: true - plan: - - get: pull-request - trigger: true - version: every - - put: notify-lint-test-pending - resource: pull-request - params: - path: pull-request - context: lint-tests - status: pending - - get: lint-test-image - trigger: true - - task: run - image: lint-test-image - config: - platform: linux - inputs: - - name: pull-request - path: terraform-google-sql-db - - name: lint-test-image - run: - path: make - args: - - '-s' - - check - dir: terraform-google-sql-db - on_success: - put: notify-lint-test-success - resource: pull-request - params: - path: pull-request - context: lint-tests - status: success - on_failure: - put: notify-lint-test-failure - resource: pull-request - params: - path: pull-request - context: lint-tests - status: failure - on_abort: - put: notify-lint-test-error - resource: pull-request - params: - path: pull-request - context: lint-tests - status: error - - - name: integration-tests - public: true - plan: - - get: pull-request - trigger: true - version: every - - put: notify-integration-test-pending - resource: pull-request - params: - path: pull-request - context: integration-tests - status: pending - - get: integration-test-image - trigger: true - - task: run-tests - image: integration-test-image - config: - platform: linux - inputs: - - name: pull-request - path: terraform-google-sql-db - run: - path: make - args: - - '-s' - - test_integration - dir: terraform-google-sql-db - params: - PROJECT_ID: ((sql_db.phoogle_project_id)) - SERVICE_ACCOUNT_JSON: ((sql_db.phoogle_sa)) - on_success: - put: notify-integration-test-success - resource: pull-request - params: - path: pull-request - context: integration-tests - status: success - on_failure: - put: notify-integration-test-failure - resource: pull-request - params: - path: pull-request - context: integration-tests - status: failure - on_abort: - put: notify-integration-test-error - resource: pull-request - params: - path: pull-request - context: integration-tests - status: error \ No newline at end of file diff --git a/infra/concourse/pipelines/terraform-google-vpn.yml b/infra/concourse/pipelines/terraform-google-vpn.yml deleted file mode 100644 index aabe22c3221..00000000000 --- a/infra/concourse/pipelines/terraform-google-vpn.yml +++ /dev/null @@ -1,128 +0,0 @@ -resource_types: - -- name: pull-request - type: docker-image - source: - repository: teliaoss/github-pr-resource - -resources: - -- name: pull-request - type: pull-request - webhook_token: ((vpn.github_webhook_token)) - source: - repository: terraform-google-modules/terraform-google-vpn - access_token: ((github.pr-access-token)) - -- name: lint-test-image - type: docker-image - source: - repository: gcr.io/cloud-foundation-cicd/cft/lint - tag: 2.4.0 - username: _json_key - password: ((sa.google)) - -- name: integration-test-image - type: docker-image - source: - repository: gcr.io/cloud-foundation-cicd/cft/kitchen-terraform - tag: 2.3.0 - username: _json_key - password: ((sa.google)) - -jobs: -- name: lint-tests - public: true - plan: - - get: pull-request - trigger: true - version: every - - put: notify-lint-test-pending - resource: pull-request - params: - path: pull-request - context: lint-tests - status: pending - - get: lint-test-image - - task: run - image: lint-test-image - config: - platform: linux - inputs: - - name: pull-request - path: terraform-google-vpn - - name: lint-test-image - run: - path: /bin/bash - args: ['-c', 'exec make check'] - dir: terraform-google-vpn - on_success: - put: notify-lint-test-success - resource: pull-request - params: - path: pull-request - context: lint-tests - status: success - on_failure: - put: notify-lint-test-failure - resource: pull-request - params: - path: pull-request - context: lint-tests - status: failure - on_abort: - put: notify-lint-tests-abort - resource: pull-request - params: - path: pull-request - context: lint-tests - status: error - -- name: integration-tests - public: true - plan: - - get: pull-request - trigger: true - version: every - - put: notify-integration-test-pending - resource: pull-request - params: - path: pull-request - context: integration-tests - status: pending - - get: integration-test-image - - task: run-tests - image: integration-test-image - config: - platform: linux - inputs: - - name: pull-request - path: terraform-google-vpn - run: - path: /bin/bash - args: ['-c', 'exec make test_integration'] - dir: terraform-google-vpn - params: - PROJECT_ID: ((vpn.phoogle_project_id)) - SERVICE_ACCOUNT_JSON: ((vpn.phoogle_sa)) - on_success: - put: notify-integration-tests-success - resource: pull-request - params: - path: pull-request - context: integration-tests - status: success - on_failure: - put: notify-integration-tests-failure - resource: pull-request - params: - path: pull-request - context: integration-tests - status: failure - on_abort: - put: notify-integration-tests-abort - resource: pull-request - params: - path: pull-request - context: integration-tests - status: error diff --git a/infra/concourse/vars/phoogle.yml b/infra/concourse/vars/phoogle.yml deleted file mode 100644 index 7d3f06540e3..00000000000 --- a/infra/concourse/vars/phoogle.yml +++ /dev/null @@ -1,7 +0,0 @@ -phoogle: - admin_account_email: admin@phoogle.net - billing_account_id: 01E8A0-35F760-5CF02A - domain: phoogle.net - folder_id: 853002531658 - group_name: pf-ci-test-fixture - org_id: 826592752744 diff --git a/infra/module-swapper/CHANGELOG.md b/infra/module-swapper/CHANGELOG.md new file mode 100644 index 00000000000..7c6b060b1b1 --- /dev/null +++ b/infra/module-swapper/CHANGELOG.md @@ -0,0 +1,156 @@ +# Changelog + +## [0.4.11](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/module-swapper/v0.4.10...infra/module-swapper/v0.4.11) (2025-01-02) + + +### Bug Fixes + +* **deps:** update module github.com/go-git/go-git/v5 to v5.13.0 ([#2780](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2780)) ([31484dd](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/31484dd81a823b52465147d651fe4bfff46e9889)) +* **deps:** update module github.com/go-git/go-git/v5 to v5.13.1 ([#2783](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2783)) ([36ae5ad](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/36ae5adac7e3d57f21ba6ae7c59af58b7f7f2329)) + +## [0.4.10](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/module-swapper/v0.4.9...infra/module-swapper/v0.4.10) (2024-12-11) + + +### Bug Fixes + +* **deps:** bump golang.org/x/crypto from 0.21.0 to 0.31.0 in /infra/module-swapper ([#2749](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2749)) ([5544152](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/55441521069ad7e2d7f5c4541a64f4f0ded32630)) + +## [0.4.9](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/module-swapper/v0.4.8...infra/module-swapper/v0.4.9) (2024-09-13) + + +### Bug Fixes + +* **deps:** update module github.com/hashicorp/hcl/v2 to v2.22.0 ([#2569](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2569)) ([cfc0fdd](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/cfc0fdd4489800c38c5896fc0847cc927a953c59)) + +## [0.4.8](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/module-swapper/v0.4.7...infra/module-swapper/v0.4.8) (2024-07-22) + + +### Bug Fixes + +* **deps:** update module github.com/zclconf/go-cty to v1.15.0 ([#2466](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2466)) ([65a1aa0](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/65a1aa030f528e8fa6ce85ef60aedd5995dc5cae)) + +## [0.4.7](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/module-swapper/v0.4.6...infra/module-swapper/v0.4.7) (2024-06-28) + + +### Bug Fixes + +* **deps:** update module github.com/hashicorp/hcl/v2 to v2.21.0 ([#2435](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2435)) ([83ae0c8](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/83ae0c8f323edd7985a1267529fbb17877803f66)) + +## [0.4.6](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/module-swapper/v0.4.5...infra/module-swapper/v0.4.6) (2024-04-22) + + +### Bug Fixes + +* **deps:** bump golang.org/x/net from 0.22.0 to 0.23.0 in /infra/module-swapper ([#2276](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2276)) ([a41759b](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/a41759b823364b9602309cbdc797156eb22e7dc2)) + +## [0.4.5](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/module-swapper/v0.4.4...infra/module-swapper/v0.4.5) (2024-04-02) + + +### Bug Fixes + +* **deps:** update module github.com/go-git/go-git/v5 to v5.12.0 ([#2244](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2244)) ([fcdca77](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/fcdca77dcfe63b6e928ce551863df69e2cedc6cd)) + +## [0.4.4](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/module-swapper/v0.4.3...infra/module-swapper/v0.4.4) (2024-03-27) + + +### Bug Fixes + +* **deps:** update module github.com/hashicorp/hcl/v2 to v2.20.1 ([#2234](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2234)) ([d0abcaa](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/d0abcaac87792cbab865f78b2e98b0ce0849f309)) + +## [0.4.3](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/module-swapper/v0.4.2...infra/module-swapper/v0.4.3) (2024-03-21) + + +### Bug Fixes + +* **deps:** update module github.com/zclconf/go-cty to v1.14.4 ([#2222](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2222)) ([4004d2a](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/4004d2a2ffaae6504279130cb31c42ce2d8dfa99)) + +## [0.4.2](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/module-swapper/v0.4.1...infra/module-swapper/v0.4.2) (2024-03-07) + + +### Bug Fixes + +* **deps:** update module github.com/hashicorp/hcl/v2 to v2.20.0 ([#2197](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2197)) ([8cdbe34](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/8cdbe3444ee4fa25f25dcd779b2cfc63ee158dc5)) +* **deps:** update module github.com/zclconf/go-cty to v1.14.3 ([#2201](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2201)) ([e6a9f22](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/e6a9f220b040fd63946eabaf8ec84385611ca8d2)) + +## [0.4.1](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/module-swapper/v0.4.0...infra/module-swapper/v0.4.1) (2024-02-13) + + +### Bug Fixes + +* **deps:** update module github.com/zclconf/go-cty to v1.14.2 ([#2114](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2114)) ([b26ff4e](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/b26ff4e8316c724a57745cab4a52773c876e7cb5)) + +## [0.4.0](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/module-swapper/v0.3.1...infra/module-swapper/v0.4.0) (2024-01-08) + + +### Features + +* module swapper hcl source targeting ([#2056](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2056)) ([e691715](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/e691715314f3f3d5a2cf10103f81f9ac7a80a3f1)) + + +### Bug Fixes + +* **deps:** bump github.com/cloudflare/circl from 1.3.3 to 1.3.7 in /infra/module-swapper ([#2077](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2077)) ([3a015c1](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/3a015c166ed27c9a53ee743ae3d44543ae3b9f12)) + +## [0.3.1](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/module-swapper/v0.3.0...infra/module-swapper/v0.3.1) (2023-12-19) + + +### Bug Fixes + +* **deps:** bump golang.org/x/crypto from 0.16.0 to 0.17.0 in /infra/module-swapper ([#2050](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2050)) ([3f6092f](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/3f6092f1a44b83f92c0b6b8c4529f20ac1478d7d)) +* **deps:** update module github.com/go-git/go-git/v5 to v5.11.0 ([#2042](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2042)) ([384f9de](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/384f9de317f3a47f590572730c521fe5228e9cc9)) + +## [0.3.0](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/module-swapper/v0.2.5...infra/module-swapper/v0.3.0) (2023-12-13) + + +### Features + +* update to GO 1.21 ([#2037](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2037)) ([e76ff55](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/e76ff55afb5ee9c8c57b7b8a802acdab1ca15130)) + +## [0.2.5](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/module-swapper/v0.2.4...infra/module-swapper/v0.2.5) (2023-11-28) + + +### Bug Fixes + +* bump the all group in /infra/module-swapper with 1 update ([#1991](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1991)) ([e761759](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/e76175977bc82a4ae9f15ab55166ad2dc8e52e31)) + +## [0.2.4](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/module-swapper/v0.2.3...infra/module-swapper/v0.2.4) (2023-10-25) + + +### Bug Fixes + +* bump the all group in /infra/module-swapper with 1 update ([#1897](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1897)) ([05757d9](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/05757d901b2c53ab28904ad03557168c44215b81)) + +## [0.2.3](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/module-swapper/v0.2.2...infra/module-swapper/v0.2.3) (2023-10-17) + + +### Bug Fixes + +* **deps:** bump golang.org/x/net from 0.15.0 to 0.17.0 in /infra/module-swapper ([#1877](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1877)) ([10ba511](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/10ba51138264e1f675d851f4f8b08e15168c74ad)) + +## [0.2.2](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/module-swapper/v0.2.1...infra/module-swapper/v0.2.2) (2023-09-20) + + +### Bug Fixes + +* **deps:** update module github.com/go-git/go-git/v5 to v5.9.0 ([#1842](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1842)) ([786b642](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/786b642a15fdfb6efc970859f75c359fc2d74db5)) + +## [0.2.1](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/module-swapper/v0.2.0...infra/module-swapper/v0.2.1) (2023-09-06) + + +### Bug Fixes + +* **deps:** update module github.com/go-git/go-git/v5 to v5.8.1 ([#1733](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1733)) ([668f901](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/668f90157ee5e3d1b07dc23caefeb1e2083bab3c)) + +## [0.2.0](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/module-swapper/v0.1.0...infra/module-swapper/v0.2.0) (2023-08-16) + + +### Features + +* update module path and GO 1.20 lint ([#1758](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1758)) ([95206e4](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/95206e4a1f3e3e46312e7334839923194a0b5942)) + +## [0.1.0](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/infra/module-swapper-v0.0.1...infra/module-swapper/v0.1.0) (2023-08-16) + + +### Features + +* convert module-swapper to release-please ([#1735](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/1735)) ([2cc1e98](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/2cc1e987d4f7a8cb861f6b78d755529ae713103e)) diff --git a/infra/module-swapper/Readme.md b/infra/module-swapper/Readme.md new file mode 100644 index 00000000000..dc80dd75b09 --- /dev/null +++ b/infra/module-swapper/Readme.md @@ -0,0 +1,19 @@ +# Module Swapper + +Module Swapper is a utility used for swapping TF registry references with local modules. It will ignore registry references to all other modules except for the one in current directory. + +``` +Usage of module-swapper: + -examples-path string + Path to examples that should be swapped. Defaults to cwd/examples (default "examples") + -registry-prefix string + Module registry prefix (default "terraform-google-modules") + -registry-suffix string + Module registry suffix (default "google") + -restore + Restores disabled modules + -submods-path string + Path to a submodules if any that maybe referenced. Defaults to working dir/modules (default "modules") + -workdir string + Absolute path to root module where examples should be swapped. Defaults to working directory +``` diff --git a/infra/module-swapper/cmd/swap.go b/infra/module-swapper/cmd/swap.go new file mode 100644 index 00000000000..77deb9791dd --- /dev/null +++ b/infra/module-swapper/cmd/swap.go @@ -0,0 +1,431 @@ +package cmd + +import ( + "fmt" + "log" + "os" + "path/filepath" + "strings" + + "github.com/go-git/go-git/v5" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/gohcl" + "github.com/hashicorp/hcl/v2/hclparse" + "github.com/hashicorp/hcl/v2/hclwrite" + "github.com/pmezard/go-difflib/difflib" + "github.com/zclconf/go-cty/cty" + + giturl "github.com/chainguard-dev/git-urls" +) + +type LocalTerraformModule struct { + Name string + Dir string + ModuleFQN string +} + +const ( + moduleBlockType = "module" + sourceAttrib = "source" + terraformExtension = "*.tf" + restoreMarker = "[restore-marker]" + linebreak = "\n" +) + +var ( + localModules = []LocalTerraformModule{} +) + +// getRemoteURL gets the URL of a given remote from git repo at dir +func getRemoteURL(dir, remoteName string) (string, error) { + r, err := git.PlainOpen(dir) + if err != nil { + return "", err + } + rm, err := r.Remote(remoteName) + if err != nil { + return "", err + } + return rm.Config().URLs[0], nil +} + +// trimAnySuffixes trims first matching suffix from slice of suffixes +func trimAnySuffixes(s string, suffixes []string) string { + for _, suffix := range suffixes { + if strings.HasSuffix(s, suffix) { + s = s[:len(s)-len(suffix)] + return s + } + } + return s +} + +// getModuleNameRegistry returns module name and registry by parsing git remote +func getModuleNameRegistry(dir string) (string, string, error) { + remote, err := getRemoteURL(dir, "origin") + if err != nil { + return "", "", err + } + u, err := giturl.Parse(remote) + if err != nil { + return "", "", err + } + if u.Host != "github.com" { + return "", "", fmt.Errorf("expected GitHub remote, got: %s", remote) + } + orgRepo := u.Path + orgRepo = trimAnySuffixes(orgRepo, []string{"/", ".git"}) + orgRepo = strings.TrimPrefix(orgRepo, "/") + + split := strings.Split(orgRepo, "/") + if len(split) != 2 { + return "", "", fmt.Errorf("expected GitHub remote of form https://github.com/ModuleRegistry/ModuleRepo, got: %s", remote) + } + org, repoName := split[0], split[1] + + // module repos are prefixed with terraform-google- + if !strings.HasPrefix(repoName, "terraform-google-") { + return "", "", fmt.Errorf("expected to find repo name prefixed with terraform-google-. Got: %s", repoName) + } + moduleName := strings.ReplaceAll(repoName, "terraform-google-", "") + log.Printf("Module name set from remote to %s", moduleName) + return moduleName, org, nil +} + +// findSubModules generates slice of LocalTerraformModule for submodules +func findSubModules(path, rootModuleFQN string) []LocalTerraformModule { + var subModules = make([]LocalTerraformModule, 0) + // if no modules dir, return empty slice + if _, err := os.Stat(path); err != nil { + log.Print("No submodules found") + return subModules + } + files, err := os.ReadDir(path) + if err != nil { + log.Fatalf("Error finding submodules: %v", err) + } + absPath, err := filepath.Abs(path) + if err != nil { + log.Fatalf("Error finding submodule absolute path: %v", err) + } + for _, f := range files { + if f.IsDir() { + subModules = append(subModules, LocalTerraformModule{f.Name(), filepath.Join(absPath, f.Name()), fmt.Sprintf("%s//modules/%s", rootModuleFQN, f.Name())}) + } + } + return subModules +} + +// restoreModules restores old config as marked by restoreMarker +func restoreModules(f []byte, p string) ([]byte, error) { + if _, err := os.Stat(p); err != nil { + return nil, err + } + strFile := string(f) + if !strings.Contains(strFile, restoreMarker) { + return f, nil + } + lines := strings.Split(strFile, linebreak) + for i, line := range lines { + if strings.Contains(line, restoreMarker) { + lines[i] = strings.Split(line, restoreMarker)[1] + } + } + return []byte(strings.Join(lines, linebreak)), nil +} + +// matchedModule returns matching local TF module based on local path. +func matchedModule(localPath string) *LocalTerraformModule { + for _, l := range localModules { + if localPath == l.Dir { + return &l + } + } + return nil +} + +// localToRemote converts all local references in f to remote references. +func localToRemote(f []byte, p string) ([]byte, error) { + if _, err := os.Stat(p); err != nil { + return nil, err + } + absPath, err := filepath.Abs(filepath.Dir(p)) + if err != nil { + return nil, fmt.Errorf("failed to get absolute path: %v", err) + } + f, err = restoreModules(f, p) + if err != nil { + return nil, err + } + + currentReferences, err := moduleSourceRefs(f, p) + if err != nil { + return nil, fmt.Errorf("failed to write find module sources: %v", err) + } + newReferences := map[string]string{} + for label, source := range currentReferences { + localModule := matchedModule(filepath.Clean(filepath.Join(absPath, source))) + if localModule == nil { + log.Printf("no matches for %s", source) + continue + } + newReferences[label] = localModule.ModuleFQN + } + if len(currentReferences) == 0 { + return f, nil + } + updated, err := writeModuleRefs(f, p, newReferences) + if err != nil { + return nil, fmt.Errorf("failed to write updated module sources: %v", err) + } + // print diff info + log.Printf("Modifications made to file %s", p) + diff := difflib.UnifiedDiff{ + A: difflib.SplitLines(string(f)), + B: difflib.SplitLines(string(updated)), + FromFile: "Original", + ToFile: "Modified", + Context: 3, + } + diffInfo, _ := difflib.GetUnifiedDiffString(diff) + log.Println(diffInfo) + return updated, nil +} + +// remoteToLocal converts all remote references in f to local references. +func remoteToLocal(f []byte, p string) ([]byte, error) { + if _, err := os.Stat(p); err != nil { + return nil, err + } + f = commentVersions(f) + absPath, err := filepath.Abs(filepath.Dir(p)) + if err != nil { + return nil, fmt.Errorf("failed to get absolute path: %v", err) + } + fqnMap := make(map[string]LocalTerraformModule, len(localModules)) + for _, l := range localModules { + fqnMap[l.ModuleFQN] = l + } + currentReferences, err := moduleSourceRefs(f, p) + if err != nil { + return nil, fmt.Errorf("failed to write find module sources: %v", err) + } + newReferences := map[string]string{} + for label, source := range currentReferences { + localModule, exists := fqnMap[source] + if !exists { + continue + } + newModulePath, err := filepath.Rel(absPath, localModule.Dir) + if err != nil { + return nil, fmt.Errorf("failed to find relative path: %v", err) + } + newReferences[label] = newModulePath + } + if len(currentReferences) == 0 { + return f, nil + } + updated, err := writeModuleRefs(f, p, newReferences) + if err != nil { + return nil, fmt.Errorf("failed to write updated module sources: %v", err) + } + // print diff info + log.Printf("Modifications made to file %s", p) + diff := difflib.UnifiedDiff{ + A: difflib.SplitLines(string(f)), + B: difflib.SplitLines(string(updated)), + FromFile: "Original", + ToFile: "Modified", + Context: 3, + } + diffInfo, _ := difflib.GetUnifiedDiffString(diff) + log.Println(diffInfo) + return updated, nil +} + +// commentVersions comments version attributes for local modules. +func commentVersions(f []byte) []byte { + strFile := string(f) + lines := strings.Split(strFile, linebreak) + for _, localModule := range localModules { + // check if current file has module/submodules references that should be swapped + if !strings.Contains(strFile, localModule.ModuleFQN) { + continue + } + for i, line := range lines { + if !strings.Contains(line, localModule.ModuleFQN) { + continue + } + if i < len(lines)-1 && strings.Contains(lines[i+1], "version") && !strings.Contains(lines[i+1], restoreMarker) { + leadingWhiteSpace := lines[i+1][:strings.Index(lines[i+1], "version")] + lines[i+1] = fmt.Sprintf("%s# %s %s", leadingWhiteSpace, restoreMarker, lines[i+1]) + } + } + } + newExample := strings.Join(lines, linebreak) + return []byte(newExample) +} + +// getTFFiles returns a slice of valid TF file paths +func getTFFiles(path string) []string { + // validate path + if _, err := os.Stat(path); err != nil { + log.Fatal(fmt.Errorf("Unable to find %s : %v", path, err)) + } + var files = make([]string, 0) + err := filepath.Walk(path, func(path string, info os.FileInfo, err error) error { + if err != nil && info.IsDir() { + return nil + } + isTFFile, _ := filepath.Match(terraformExtension, filepath.Base(path)) + if isTFFile { + files = append(files, path) + } + return nil + }) + if err != nil { + log.Printf("Error walking files: %v", err) + } + return files + +} + +var ( + // Partial schema of examples. + exampleSchema = &hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: moduleBlockType, + LabelNames: []string{"name"}, + }, + }, + } + // Partial schema of each module. + moduleSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: sourceAttrib, + }, + }, + } +) + +// moduleSourceRefs returns a map of module label to corresponding source references. +func moduleSourceRefs(f []byte, TFFilePath string) (map[string]string, error) { + refs := map[string]string{} + p, err := hclparse.NewParser().ParseHCL(f, TFFilePath) + if err != nil { + return nil, fmt.Errorf("failed to parse hcl: %v", err) + } + c, _, diags := p.Body.PartialContent(exampleSchema) + if diags.HasErrors() { + return nil, fmt.Errorf("failed to parse example content: %v", diags.Error()) + } + + for _, b := range c.Blocks { + if b.Type != moduleBlockType { + continue + } + if len(b.Labels) != 1 { + log.Printf("got multiple labels %v, module should only have one", b.Labels) + continue + } + + content, _, diags := b.Body.PartialContent(moduleSchema) + if diags.HasErrors() { + log.Printf("skipping %s module, failed to parse module content: %v", b.Labels[0], diags.Error()) + continue + } + + sourcrAttr, exists := content.Attributes[sourceAttrib] + if !exists { + log.Printf("skipping %s module, no source attribute", b.Labels[0]) + continue + } + var sourceName string + diags = gohcl.DecodeExpression(sourcrAttr.Expr, nil, &sourceName) + if diags.HasErrors() { + log.Printf("skipping %s module, failed to decode source value: %v", b.Labels[0], diags.Error()) + continue + } + refs[b.Labels[0]] = sourceName + } + return refs, nil +} + +// writeModuleRefs appends or overwrites provided moduleRefs to file f. +func writeModuleRefs(f []byte, p string, moduleRefs map[string]string) ([]byte, error) { + wf, diags := hclwrite.ParseConfig(f, p, hcl.Pos{}) + if diags.HasErrors() { + return nil, fmt.Errorf("failed to parse hcl: %v", diags.Error()) + } + for _, b := range wf.Body().Blocks() { + if b.Type() != moduleBlockType { + continue + } + if len(b.Labels()) != 1 { + log.Printf("got multiple labels %v, module should only have one", b.Labels()) + continue + } + newSource, exists := moduleRefs[b.Labels()[0]] + if !exists { + continue + } + b.Body().SetAttributeValue(sourceAttrib, cty.StringVal(newSource)) + } + + var testS strings.Builder + _, err := wf.WriteTo(&testS) + if err != nil { + return nil, fmt.Errorf("failed to write hcl: %v", diags.Error()) + } + return []byte(testS.String()), nil +} + +func SwapModules(rootPath, moduleRegistrySuffix, moduleRegistryPrefix, subModulesDir, examplesDir string, restore bool) { + rootPath = filepath.Clean(rootPath) + moduleName, foundRegistryPrefix, err := getModuleNameRegistry(rootPath) + if err != nil && moduleRegistryPrefix == "" { + log.Printf("failed to get module name and registry: %v", err) + return + } + + if moduleRegistryPrefix != "" { + foundRegistryPrefix = moduleRegistryPrefix + } + + // add root module to slice of localModules + localModules = append(localModules, LocalTerraformModule{moduleName, rootPath, fmt.Sprintf("%s/%s/%s", foundRegistryPrefix, moduleName, moduleRegistrySuffix)}) + examplesPath := fmt.Sprintf("%s/%s", rootPath, examplesDir) + subModulesPath := fmt.Sprintf("%s/%s", rootPath, subModulesDir) + + // add submodules, if any to localModules + submods := findSubModules(subModulesPath, localModules[0].ModuleFQN) + localModules = append(localModules, submods...) + + // find all TF files in examples dir to process + exampleTFFiles := getTFFiles(examplesPath) + for _, TFFilePath := range exampleTFFiles { + file, err := os.ReadFile(TFFilePath) + if err != nil { + log.Printf("Error reading file: %v", err) + } + + var newFile []byte + if restore { + newFile, err = localToRemote(file, TFFilePath) + } else { + newFile, err = remoteToLocal(file, TFFilePath) + } + if err != nil { + log.Printf("Error processing file: %v", err) + } + + if newFile != nil { + err = os.WriteFile(TFFilePath, newFile, 0644) + if err != nil { + log.Printf("Error writing file: %v", err) + } + } + } +} diff --git a/infra/module-swapper/cmd/swap_test.go b/infra/module-swapper/cmd/swap_test.go new file mode 100644 index 00000000000..e4f25243006 --- /dev/null +++ b/infra/module-swapper/cmd/swap_test.go @@ -0,0 +1,234 @@ +package cmd + +import ( + "bytes" + "log" + "os" + "path" + "path/filepath" + "reflect" + "strings" + "testing" + + "github.com/go-git/go-git/v5" + "github.com/go-git/go-git/v5/config" + "github.com/google/go-cmp/cmp" +) + +var ( + moduleRegistryPrefix = "terraform-google-modules" + moduleRegistrySuffix = "google" +) + +func getAbsPathHelper(p string) string { + a, err := filepath.Abs(p) + if err != nil { + log.Fatalf("Unable to find absolute path %s: %v", p, err) + } + return a +} + +func getFileHelper(t *testing.T, p string) []byte { + f, err := os.ReadFile(p) + if err != nil { + t.Fatalf("Error reading file: %v", err) + } + return f +} + +func setupProcessFileTest(modules []LocalTerraformModule) { + localModules = modules +} + +func tearDownProcessFileTest() { + localModules = []LocalTerraformModule{} +} + +func Test_getTFFiles(t *testing.T) { + type args struct { + path string + } + tests := []struct { + name string + args args + want []string + }{ + {"simple", args{"testdata/example-module-simple"}, []string{"testdata/example-module-simple/examples/example-one/main.tf", "testdata/example-module-simple/examples/main.tf"}}, + {"simple-single-submodule", args{"testdata/example-module-with-submodules/modules/bar-module"}, []string{"testdata/example-module-with-submodules/modules/bar-module/main.tf"}}, + {"simple-single-submodule-empty", args{"testdata/example-module-with-submodules/docs"}, []string{}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := getTFFiles(tt.args.path); !reflect.DeepEqual(got, tt.want) { + t.Errorf("getTFFiles() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_findSubModules(t *testing.T) { + type args struct { + path string + rootModuleFQN string + } + tests := []struct { + name string + args args + want []LocalTerraformModule + }{ + {"simple-no-submodules", args{"testdata/example-module-simple/modules", "terraform-google-modules/example-module-simple/google"}, []LocalTerraformModule{}}, + {"simple-with-submodules", args{"testdata/example-module-with-submodules/modules", "terraform-google-modules/example-module-with-submodules/google"}, + []LocalTerraformModule{ + {"bar-module", filepath.Join(getAbsPathHelper("testdata/example-module-with-submodules/modules"), "bar-module"), "terraform-google-modules/example-module-with-submodules/google//modules/bar-module"}, + {"foo-module", filepath.Join(getAbsPathHelper("testdata/example-module-with-submodules/modules"), "foo-module"), "terraform-google-modules/example-module-with-submodules/google//modules/foo-module"}, + }}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := findSubModules(tt.args.path, tt.args.rootModuleFQN); !reflect.DeepEqual(got, tt.want) { + t.Errorf("findSubModules() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_processFile(t *testing.T) { + tests := []struct { + name string + modules []LocalTerraformModule + exampleRemotePath string + exampleLocalPath string + }{ + { + name: "simple", + modules: testModules("example-module-simple"), + exampleRemotePath: "example-module-simple/examples/example-one/main.tf", + exampleLocalPath: "example-module-simple/examples/example-one/main.tf.local", + }, + { + name: "simple-submodules-single-submod", + modules: testModules("example-module-with-submodules"), + exampleRemotePath: "example-module-with-submodules/examples/example-one/main.tf", + exampleLocalPath: "example-module-with-submodules/examples/example-one/main.tf.local", + }, + { + name: "simple-submodules-multiple-modules", + modules: testModules("example-module-with-submodules"), + exampleRemotePath: "example-module-with-submodules/examples/example-two/main.tf", + exampleLocalPath: "example-module-with-submodules/examples/example-two/main.tf.local", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + setupProcessFileTest(tt.modules) + var buf bytes.Buffer + log.SetOutput(&buf) + defer func() { + log.SetOutput(os.Stderr) + }() + tt.exampleRemotePath = path.Join(testDataDir, tt.exampleRemotePath) + tt.exampleLocalPath = path.Join(testDataDir, tt.exampleLocalPath) + remoteExample := getFileHelper(t, tt.exampleRemotePath) + localExample := getFileHelper(t, tt.exampleLocalPath) + + // Swap remote references to local. + got, err := remoteToLocal(remoteExample, tt.exampleRemotePath) + if err != nil { + t.Fatalf("remoteToLocal() error = %v", err) + return + } + if diff := cmp.Diff(localExample, got); diff != "" { + t.Errorf("remoteToLocal() returned unexpected difference (-want +got):\n%s", diff) + } + + // Swap local references to remote. + got, err = localToRemote(localExample, tt.exampleLocalPath) + t.Log(buf.String()) + if err != nil { + t.Errorf("localToRemote() error = %v", err) + return + } + if diff := cmp.Diff(remoteExample, got); diff != "" { + t.Errorf("localToRemote() returned unexpected difference (-want +got):\n%s", diff) + } + tearDownProcessFileTest() + }) + } +} + +const testDataDir = "testdata" + +func testModules(m string) []LocalTerraformModule { + root := LocalTerraformModule{m, getAbsPathHelper(path.Join(testDataDir, m)), path.Join(moduleRegistryPrefix, m, moduleRegistrySuffix)} + return append(findSubModules(path.Join(testDataDir, m, "modules"), path.Join(moduleRegistryPrefix, m, moduleRegistrySuffix)), root) +} + +func getTempDir() string { + d, err := os.MkdirTemp("", "gitrmtest") + if err != nil { + log.Fatalf("Error creating tempdir: %v", err) + } + return d +} + +func tempGitRepoWithRemote(repoURL, remote string) string { + dir := getTempDir() + r, err := git.PlainInit(dir, true) + if err != nil { + log.Fatalf("Error creating repo in tempdir: %v", err) + } + _, err = r.CreateRemote(&config.RemoteConfig{ + Name: remote, + URLs: []string{repoURL}, + }) + if err != nil { + log.Fatalf("Error creating remote in tempdir repo: %v", err) + } + return dir +} + +func Test_getModuleNameRegistry(t *testing.T) { + type args struct { + dir string + } + tests := []struct { + name string + args args + want string + want1 string + wantErr bool + wantErrStr string + }{ + {"simple-https", args{tempGitRepoWithRemote("https://github.com/foo/terraform-google-bar", "origin")}, "bar", "foo", false, ""}, + {"simple-git", args{tempGitRepoWithRemote("git@github.com:foo/terraform-google-bar.git", "origin")}, "bar", "foo", false, ""}, + {"simple-with-trailing-slash", args{tempGitRepoWithRemote("https://github.com/foo/terraform-google-bar/", "origin")}, "bar", "foo", false, ""}, + {"simple-with-trailing-git", args{tempGitRepoWithRemote("https://github.com/foo/terraform-google-bar.git", "origin")}, "bar", "foo", false, ""}, + {"err-no-remote-origin", args{tempGitRepoWithRemote("https://github.com/foo/terraform-google-bar", "foo")}, "", "", true, ""}, + {"err-not-git-repo", args{getTempDir()}, "", "", true, ""}, + {"err-not-github-repo-https", args{tempGitRepoWithRemote("https://gitlab.com/foo/terraform-google-bar", "origin")}, "", "", true, "expected GitHub remote, got: https://gitlab.com/foo/terraform-google-bar"}, + {"err-not-github-repo-ssh", args{tempGitRepoWithRemote("git@gitlab.com:foo/terraform-google-bar.git", "origin")}, "", "", true, "expected GitHub remote, got: git@gitlab.com:foo/terraform-google-bar.git"}, + {"err-not-prefixed-repo", args{tempGitRepoWithRemote("https://github.com/foo/bar", "origin")}, "", "", true, "expected to find repo name prefixed with terraform-google-"}, + {"err-malformed-remote", args{tempGitRepoWithRemote("https://github.com/footerraform-google-bar", "origin")}, "", "", true, "expected GitHub remote of form https://github.com/ModuleRegistry/ModuleRepo"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, got1, err := getModuleNameRegistry(tt.args.dir) + if (err != nil) != tt.wantErr { + t.Errorf("getModuleNameRegistry() error = %v, wantErr %v", err, tt.wantErr) + return + } else { + if tt.wantErrStr != "" { + if !strings.Contains(err.Error(), tt.wantErrStr) { + t.Errorf("getModuleNameRegistry() error = %v, expected to contain %v", err, tt.wantErrStr) + } + } + } + if got != tt.want { + t.Errorf("getModuleNameRegistry() got = %v, want %v", got, tt.want) + } + if got1 != tt.want1 { + t.Errorf("getModuleNameRegistry() got1 = %v, want %v", got1, tt.want1) + } + }) + } +} diff --git a/infra/module-swapper/cmd/testdata/example-module-simple/Readme.md b/infra/module-swapper/cmd/testdata/example-module-simple/Readme.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/infra/module-swapper/cmd/testdata/example-module-simple/examples/example-one/main.tf b/infra/module-swapper/cmd/testdata/example-module-simple/examples/example-one/main.tf new file mode 100644 index 00000000000..832453111fe --- /dev/null +++ b/infra/module-swapper/cmd/testdata/example-module-simple/examples/example-one/main.tf @@ -0,0 +1,65 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +provider "google" { + version = ">= 3.0" +} + +# [START cloudrouter_create] +module "cloud_router" { + source = "terraform-google-modules/example-module-simple/google" + version = "~> 0.4" + + name = "my-router" + region = "us-central1" + + bgp = { + # The ASN (16550, 64512 - 65534, 4200000000 - 4294967294) can be any private ASN + # not already used as a peer ASN in the same region and network or 16550 for Partner Interconnect. + asn = "65001" + } + + # project = "my-project-id" + project = var.project + # network = "my-network" + network = var.network +} +# [END cloudrouter_create] + +# Unrelated module +module "vpc" { + source = "terraform-google-modules/network/google" + version = "~> 2.0.0" + network_name = "example-vpc" + + routes = [ + { + name = "egress-internet" + description = "route through IGW to access internet" + destination_range = "0.0.0.0/0" + tags = "egress-inet" + next_hop_internet = "true" + }, + { + name = "app-proxy" + description = "route through proxy to reach app" + destination_range = "10.50.10.0/24" + tags = "app-proxy" + next_hop_instance = "app-proxy-instance" + next_hop_instance_zone = "us-west1-a" + }, + ] +} diff --git a/infra/module-swapper/cmd/testdata/example-module-simple/examples/example-one/main.tf.local b/infra/module-swapper/cmd/testdata/example-module-simple/examples/example-one/main.tf.local new file mode 100644 index 00000000000..54292f342b1 --- /dev/null +++ b/infra/module-swapper/cmd/testdata/example-module-simple/examples/example-one/main.tf.local @@ -0,0 +1,65 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +provider "google" { + version = ">= 3.0" +} + +# [START cloudrouter_create] +module "cloud_router" { + source = "../.." + # [restore-marker] version = "~> 0.4" + + name = "my-router" + region = "us-central1" + + bgp = { + # The ASN (16550, 64512 - 65534, 4200000000 - 4294967294) can be any private ASN + # not already used as a peer ASN in the same region and network or 16550 for Partner Interconnect. + asn = "65001" + } + + # project = "my-project-id" + project = var.project + # network = "my-network" + network = var.network +} +# [END cloudrouter_create] + +# Unrelated module +module "vpc" { + source = "terraform-google-modules/network/google" + version = "~> 2.0.0" + network_name = "example-vpc" + + routes = [ + { + name = "egress-internet" + description = "route through IGW to access internet" + destination_range = "0.0.0.0/0" + tags = "egress-inet" + next_hop_internet = "true" + }, + { + name = "app-proxy" + description = "route through proxy to reach app" + destination_range = "10.50.10.0/24" + tags = "app-proxy" + next_hop_instance = "app-proxy-instance" + next_hop_instance_zone = "us-west1-a" + }, + ] +} diff --git a/infra/module-swapper/cmd/testdata/example-module-simple/examples/main.tf b/infra/module-swapper/cmd/testdata/example-module-simple/examples/main.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/infra/module-swapper/cmd/testdata/example-module-with-submodules/docs/Readme.md b/infra/module-swapper/cmd/testdata/example-module-with-submodules/docs/Readme.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/infra/build/developer-tools/build/scripts/module-swapper/cmd/testdata/example-module-with-submodules/examples/example-one/main.tf b/infra/module-swapper/cmd/testdata/example-module-with-submodules/examples/example-one/main.tf similarity index 100% rename from infra/build/developer-tools/build/scripts/module-swapper/cmd/testdata/example-module-with-submodules/examples/example-one/main.tf rename to infra/module-swapper/cmd/testdata/example-module-with-submodules/examples/example-one/main.tf diff --git a/infra/module-swapper/cmd/testdata/example-module-with-submodules/examples/example-one/main.tf.local b/infra/module-swapper/cmd/testdata/example-module-with-submodules/examples/example-one/main.tf.local new file mode 100644 index 00000000000..c47dbc91b80 --- /dev/null +++ b/infra/module-swapper/cmd/testdata/example-module-with-submodules/examples/example-one/main.tf.local @@ -0,0 +1,27 @@ +module "test-module" { + source = "../.." + # [restore-marker] version = "~> 3.2.0" + + project_id = var.project_id # Replace this with your project ID in quotes + network_name = "my-custom-mode-network" + mtu = 1460 +} + +module "test-submodule-module" { + source = "../../modules/bar-module" + # [restore-marker] version = "~> 3.2.0" + + project_id = var.project_id # Replace this with your project ID in quotes + network_name = "my-custom-mode-network" + mtu = 1460 +} + +# Unrelated submodule +module "test-unrelated-submodule-module" { + source = "terraform-google-modules/foo/google" + version = "~> 3.2.0" + + project_id = var.project_id # Replace this with your project ID in quotes + network_name = "my-custom-mode-network" + mtu = 1460 +} diff --git a/infra/build/developer-tools/build/scripts/module-swapper/cmd/testdata/example-module-with-submodules/examples/example-two/main.tf b/infra/module-swapper/cmd/testdata/example-module-with-submodules/examples/example-two/main.tf similarity index 100% rename from infra/build/developer-tools/build/scripts/module-swapper/cmd/testdata/example-module-with-submodules/examples/example-two/main.tf rename to infra/module-swapper/cmd/testdata/example-module-with-submodules/examples/example-two/main.tf diff --git a/infra/module-swapper/cmd/testdata/example-module-with-submodules/examples/example-two/main.tf.local b/infra/module-swapper/cmd/testdata/example-module-with-submodules/examples/example-two/main.tf.local new file mode 100644 index 00000000000..c94df85f271 --- /dev/null +++ b/infra/module-swapper/cmd/testdata/example-module-with-submodules/examples/example-two/main.tf.local @@ -0,0 +1,46 @@ +# Unrelated module +module "test-unrelated-submodule" { + source = "terraform-google-modules/foo/google" + version = "~> 3.2.0" + + project_id = var.project_id # Replace this with your project ID in quotes + network_name = "my-custom-mode-network" + mtu = 1460 +} + +module "test-module" { + source = "../.." + # [restore-marker] version = "~> 3.2.0" + + project_id = var.project_id # Replace this with your project ID in quotes + network_name = "my-custom-mode-network" + mtu = 1460 +} + +module "test-submodule-1" { + source = "../../modules/bar-module" + # [restore-marker] version = "~> 3.2.0" + + project_id = var.project_id # Replace this with your project ID in quotes + network_name = "my-custom-mode-network" + mtu = 1460 +} + +module "test-submodule-2" { + source = "../../modules/foo-module" + # [restore-marker] version = "~> 3.2.0" + + project_id = var.project_id # Replace this with your project ID in quotes + network_name = "my-custom-mode-network" + mtu = 1460 +} + +# Unrelated submodule +module "test-unrelated-submodule" { + source = "terraform-google-modules/foo/google//modules/bar-module" + version = "~> 3.2.0" + + project_id = var.project_id # Replace this with your project ID in quotes + network_name = "my-custom-mode-network" + mtu = 1460 +} diff --git a/infra/module-swapper/cmd/testdata/example-module-with-submodules/examples/main.tf b/infra/module-swapper/cmd/testdata/example-module-with-submodules/examples/main.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/infra/module-swapper/cmd/testdata/example-module-with-submodules/modules/bar-module/main.tf b/infra/module-swapper/cmd/testdata/example-module-with-submodules/modules/bar-module/main.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/infra/module-swapper/cmd/testdata/example-module-with-submodules/modules/foo-module/main.tf b/infra/module-swapper/cmd/testdata/example-module-with-submodules/modules/foo-module/main.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/infra/module-swapper/go.mod b/infra/module-swapper/go.mod new file mode 100644 index 00000000000..8e559b4ffa0 --- /dev/null +++ b/infra/module-swapper/go.mod @@ -0,0 +1,41 @@ +module github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/module-swapper + +go 1.22 + +require ( + github.com/chainguard-dev/git-urls v1.0.2 + github.com/go-git/go-git/v5 v5.13.1 + github.com/google/go-cmp v0.6.0 + github.com/hashicorp/hcl/v2 v2.23.0 + github.com/pmezard/go-difflib v1.0.0 + github.com/zclconf/go-cty v1.15.1 +) + +require ( + dario.cat/mergo v1.0.0 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/ProtonMail/go-crypto v1.1.3 // indirect + github.com/agext/levenshtein v1.2.1 // indirect + github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect + github.com/cloudflare/circl v1.3.7 // indirect + github.com/cyphar/filepath-securejoin v0.3.6 // indirect + github.com/emirpasic/gods v1.18.1 // indirect + github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect + github.com/go-git/go-billy/v5 v5.6.1 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect + github.com/kevinburke/ssh_config v1.2.0 // indirect + github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 // indirect + github.com/pjbgf/sha1cd v0.3.0 // indirect + github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect + github.com/skeema/knownhosts v1.3.0 // indirect + github.com/xanzy/ssh-agent v0.3.3 // indirect + golang.org/x/crypto v0.31.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/net v0.33.0 // indirect + golang.org/x/sync v0.10.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/text v0.21.0 // indirect + golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect + gopkg.in/warnings.v0 v0.1.2 // indirect +) diff --git a/infra/module-swapper/go.sum b/infra/module-swapper/go.sum new file mode 100644 index 00000000000..6424aa17387 --- /dev/null +++ b/infra/module-swapper/go.sum @@ -0,0 +1,124 @@ +dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/ProtonMail/go-crypto v1.1.3 h1:nRBOetoydLeUb4nHajyO2bKqMLfWQ/ZPwkXqXxPxCFk= +github.com/ProtonMail/go-crypto v1.1.3/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= +github.com/agext/levenshtein v1.2.1 h1:QmvMAjj2aEICytGiWzmxoE0x2KZvE0fvmqMOfy2tjT8= +github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= +github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= +github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= +github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/chainguard-dev/git-urls v1.0.2 h1:pSpT7ifrpc5X55n4aTTm7FFUE+ZQHKiqpiwNkJrVcKQ= +github.com/chainguard-dev/git-urls v1.0.2/go.mod h1:rbGgj10OS7UgZlbzdUQIQpT0k/D4+An04HJY7Ol+Y/o= +github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= +github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= +github.com/cyphar/filepath-securejoin v0.3.6 h1:4d9N5ykBnSp5Xn2JkhocYDkOpURL/18CYMpo6xB9uWM= +github.com/cyphar/filepath-securejoin v0.3.6/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/elazarl/goproxy v1.2.3 h1:xwIyKHbaP5yfT6O9KIeYJR5549MXRQkoQMRXGztz8YQ= +github.com/elazarl/goproxy v1.2.3/go.mod h1:YfEbZtqP4AetfO6d40vWchF3znWX7C7Vd6ZMfdL8z64= +github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= +github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= +github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c= +github.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= +github.com/go-git/go-billy/v5 v5.6.1 h1:u+dcrgaguSSkbjzHwelEjc0Yj300NUevrrPphk/SoRA= +github.com/go-git/go-billy/v5 v5.6.1/go.mod h1:0AsLr1z2+Uksi4NlElmMblP5rPcDZNRCD8ujZCRR2BE= +github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= +github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= +github.com/go-git/go-git/v5 v5.13.1 h1:DAQ9APonnlvSWpvolXWIuV6Q6zXy2wHbN4cVlNR5Q+M= +github.com/go-git/go-git/v5 v5.13.1/go.mod h1:qryJB4cSBoq3FRoBRf5A77joojuBcmPJ0qu3XXXVixc= +github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= +github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/hashicorp/hcl/v2 v2.23.0 h1:Fphj1/gCylPxHutVSEOf2fBOh1VE4AuLV7+kbJf3qos= +github.com/hashicorp/hcl/v2 v2.23.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= +github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= +github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 h1:DpOJ2HYzCv8LZP15IdmG+YdwD2luVPHITV96TkirNBM= +github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= +github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= +github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= +github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/skeema/knownhosts v1.3.0 h1:AM+y0rI04VksttfwjkSTNQorvGqmwATnvnAHpSgc0LY= +github.com/skeema/knownhosts v1.3.0/go.mod h1:sPINvnADmT/qYH1kfv+ePMmOBTH6Tbl7b5LvTDjFK7M= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= +github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= +github.com/zclconf/go-cty v1.15.1 h1:RgQYm4j2EvoBRXOPxhUvxPzRrGDo1eCOhHXuGfrj5S0= +github.com/zclconf/go-cty v1.15.1/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo= +github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/infra/module-swapper/main.go b/infra/module-swapper/main.go new file mode 100644 index 00000000000..d072d3c202a --- /dev/null +++ b/infra/module-swapper/main.go @@ -0,0 +1,29 @@ +package main + +import ( + "flag" + "log" + "os" + + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/module-swapper/cmd" +) + +func main() { + workDir := flag.String("workdir", "", "Absolute path to root module where examples should be swapped. Defaults to working directory") + subModulesDir := flag.String("submods-path", "modules", "Path to a submodules if any that maybe referenced. Defaults to working dir/modules") + examplesDir := flag.String("examples-path", "examples", "Path to examples that should be swapped. Defaults to cwd/examples") + moduleRegistrySuffix := flag.String("registry-suffix", "google", "Module registry suffix") + moduleRegistryPrefix := flag.String("registry-prefix", "", "Module registry prefix") + restore := flag.Bool("restore", false, "Restores disabled modules") + flag.Parse() + rootPath := *workDir + // if no workDir specified default to current working directory + if rootPath == "" { + cwd, err := os.Getwd() + if err != nil { + log.Fatalf("Unable to get cwd: %v", err) + } + rootPath = cwd + } + cmd.SwapModules(rootPath, *moduleRegistrySuffix, *moduleRegistryPrefix, *subModulesDir, *examplesDir, *restore) +} diff --git a/infra/modules/real_time_enforcer_roles/main.tf b/infra/modules/real_time_enforcer_roles/main.tf index ddbd02cf7b4..c5d2bc48158 100644 --- a/infra/modules/real_time_enforcer_roles/main.tf +++ b/infra/modules/real_time_enforcer_roles/main.tf @@ -58,7 +58,7 @@ resource "google_organization_iam_custom_role" "forseti-enforcer-writer" { resource "random_id" "prevent_destroy" { count = "${var.prevent_destroy ? 1 : 0}" byte_length = 8 - keepers { + keepers = { viewer = "${google_organization_iam_custom_role.forseti-enforcer-viewer.role_id}" writer = "${google_organization_iam_custom_role.forseti-enforcer-writer.role_id}" } diff --git a/infra/modules/real_time_enforcer_roles/versions.tf b/infra/modules/real_time_enforcer_roles/versions.tf new file mode 100644 index 00000000000..c19009fe619 --- /dev/null +++ b/infra/modules/real_time_enforcer_roles/versions.tf @@ -0,0 +1,19 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +terraform { + required_version = ">= 0.13.7" +} diff --git a/infra/modules/seed_project/main.tf b/infra/modules/seed_project/main.tf index a22abe762ad..a4cc6b4d9dc 100644 --- a/infra/modules/seed_project/main.tf +++ b/infra/modules/seed_project/main.tf @@ -27,7 +27,7 @@ locals { module "project_factory" { source = "terraform-google-modules/project-factory/google" - version = "~> 2.0" + version = "~> 14.0" random_project_id = "true" name = "${var.username}-seed" diff --git a/infra/modules/seed_project/variables.tf b/infra/modules/seed_project/variables.tf index fd39f5f441e..13748608850 100644 --- a/infra/modules/seed_project/variables.tf +++ b/infra/modules/seed_project/variables.tf @@ -4,7 +4,7 @@ variable "username" { variable "owner_emails" { description = "A list of identities to add as owners on the project in the member format described here: https://cloud.google.com/iam/docs/overview#iam_policy" - type = "list" + type = list } variable "seed_project_services" { @@ -22,7 +22,7 @@ variable "seed_project_services" { "serviceusage.googleapis.com", ] - type = "list" + type = list } variable "seed_project_roles" { @@ -36,7 +36,7 @@ variable "seed_project_roles" { "roles/resourcemanager.projectIamAdmin", ] - type = "list" + type = list } variable "org_id" { diff --git a/infra/modules/seed_project/versions.tf b/infra/modules/seed_project/versions.tf new file mode 100644 index 00000000000..c19009fe619 --- /dev/null +++ b/infra/modules/seed_project/versions.tf @@ -0,0 +1,19 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +terraform { + required_version = ">= 0.13.7" +} diff --git a/infra/terraform/dev-org/dev-project-cleanup/.terraform.lock.hcl b/infra/terraform/dev-org/dev-project-cleanup/.terraform.lock.hcl new file mode 100644 index 00000000000..1abacd864d8 --- /dev/null +++ b/infra/terraform/dev-org/dev-project-cleanup/.terraform.lock.hcl @@ -0,0 +1,122 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/archive" { + version = "2.6.0" + constraints = ">= 1.2.0, >= 1.3.0, < 3.0.0" + hashes = [ + "h1:rYAubRk7UHC/fzYqFV/VHc+7VIY01ugCxauyTYCNf9E=", + "zh:29273484f7423b7c5b3f5df34ccfc53e52bb5e3d7f46a81b65908e7a8fd69072", + "zh:3cba58ec3aea5f301caf2acc31e184c55d994cc648126cac39c63ae509a14179", + "zh:55170cd17dbfdea842852c6ae2416d057fec631ba49f3bb6466a7268cd39130e", + "zh:7197db402ba35631930c3a4814520f0ebe980ae3acb7f8b5a6f70ec90dc4a388", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:8bf7fe0915d7fb152a3a6b9162614d2ec82749a06dba13fab3f98d33c020ec4f", + "zh:8ce811844fd53adb0dabc9a541f8cb43aacfa7d8e39324e4bd3592b3428f5bfb", + "zh:bca795bca815b8ac90e3054c0a9ab1ccfb16eedbb3418f8ad473fc5ad6bf0ef7", + "zh:d9355a18df5a36cf19580748b23249de2eb445c231c36a353709f8f40a6c8432", + "zh:dc32cc32cfd8abf8752d34f2a783de0d3f7200c573b885ecb64ece5acea173b4", + "zh:ef498e20391bf7a280d0fd6fd6675621c85fbe4e92f0f517ae4394747db89bde", + "zh:f2bc5226c765b0c8055a7b6207d0fe1eb9484e3ec8880649d158827ac6ed3b22", + ] +} + +provider "registry.terraform.io/hashicorp/google" { + version = "6.6.0" + constraints = ">= 3.35.0, >= 3.43.0, >= 3.53.0, >= 4.23.0, >= 4.28.0, >= 5.31.0, >= 5.41.0, < 7.0.0" + hashes = [ + "h1:mllWOZFO8u2kD2kRTdDDAa8Jt+vb8Uxhf6C0lwLxoz8=", + "zh:0c181f9b9f0ab81731e5c4c2d20b6d342244506687437dad94e279ef2a588f68", + "zh:12a4c333fc0ba670e87f09eb574e4b7da90381f9929ef7c866048b6841cc8a6a", + "zh:15c277c2052df89429051350df4bccabe4cf46068433d4d8c673820d9756fc00", + "zh:35d1663c81b81cd98d768fa7b80874b48c51b27c036a3c598a597f653374d3c8", + "zh:56b268389758d544722a342da4174c486a40ffa2a49b45a06111fe31c6c9c867", + "zh:abd3ea8c3a62928ba09ba7eb42b52f53e682bd65e92d573f1739596b5a9a67b1", + "zh:be55a328d61d9db58690db74ed43614111e1105e5e52cee15acaa062df4e233e", + "zh:ce2317ce9fd02cf14323f9e061c43a415b4ae9b3f96046460d0e6b6529a5aa6c", + "zh:d54a6d8e031c824f1de21b93c3e01ed7fec134b4ae55223d08868c6168c98e47", + "zh:d8c6e33b5467c6eb5a970adb251c4c8194af12db5388cff9d4b250294eae4daa", + "zh:f49e4cc9c0b55b3bec7da64dd698298345634a5df372228ee12aa45e57982f64", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + ] +} + +provider "registry.terraform.io/hashicorp/google-beta" { + version = "6.6.0" + constraints = ">= 3.35.0, >= 3.43.0, >= 4.11.0, >= 5.41.0, < 7.0.0" + hashes = [ + "h1:B4Wrkju7TTLWlCIDh+Vh4knFQS3wmFm0NHICGjCNO3k=", + "zh:1bf8f840a9a4ac1e120a6155225a0defbfa7f07b19c9bb37b45f95006b020ccf", + "zh:39077f037e611bdbd6af42e51b2881ea03d62ad55f21b42f90dc09e2cf812753", + "zh:64313dd2158749e3a4f2759edb896a9efa2c2afc59feb38d3af57e31eaa64480", + "zh:6bec8b21a20f50d81ca2e633cdaf1144bb8615a1dedf50e87c86f4eda3467b05", + "zh:74566c568410997fe966ef44130d19d640dbb427ffec3de93f0fd2affeb6fd8f", + "zh:8fe1c42d3229d0fe64961b7fa480689408eff3e5be62eb108d6aa9d36a10a769", + "zh:9593b59efd271623f45d133164eae16676130439727a625c10d3b929d2f28671", + "zh:a72c71431523d1f0d0d8baf7141ff16aa5938c0edf27e05dc4d1dc3455a50d01", + "zh:cbc96a215575d94601ec315a2db8802b521e904aaecf2602bce0110786cfa81f", + "zh:e71c3e06e861d5c9d1782b0bbaef93b5b9defa926304f90ddd22bc9b69ee14bd", + "zh:f559eefcc67d771ce0157e7ec021c1025b8af2a8c7ca89e1f5ac812e16bf9760", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + ] +} + +provider "registry.terraform.io/hashicorp/null" { + version = "3.2.3" + constraints = ">= 2.1.0, < 4.0.0" + hashes = [ + "h1:+AnORRgFbRO6qqcfaQyeX80W0eX3VmjadjnUFUJTiXo=", + "zh:22d062e5278d872fe7aed834f5577ba0a5afe34a3bdac2b81f828d8d3e6706d2", + "zh:23dead00493ad863729495dc212fd6c29b8293e707b055ce5ba21ee453ce552d", + "zh:28299accf21763ca1ca144d8f660688d7c2ad0b105b7202554ca60b02a3856d3", + "zh:55c9e8a9ac25a7652df8c51a8a9a422bd67d784061b1de2dc9fe6c3cb4e77f2f", + "zh:756586535d11698a216291c06b9ed8a5cc6a4ec43eee1ee09ecd5c6a9e297ac1", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:9d5eea62fdb587eeb96a8c4d782459f4e6b73baeece4d04b4a40e44faaee9301", + "zh:a6355f596a3fb8fc85c2fb054ab14e722991533f87f928e7169a486462c74670", + "zh:b5a65a789cff4ada58a5baffc76cb9767dc26ec6b45c00d2ec8b1b027f6db4ed", + "zh:db5ab669cf11d0e9f81dc380a6fdfcac437aea3d69109c7aef1a5426639d2d65", + "zh:de655d251c470197bcbb5ac45d289595295acb8f829f6c781d4a75c8c8b7c7dd", + "zh:f5c68199f2e6076bce92a12230434782bf768103a427e9bb9abee99b116af7b5", + ] +} + +provider "registry.terraform.io/hashicorp/random" { + version = "3.6.3" + constraints = ">= 2.1.0, >= 2.2.0, < 4.0.0" + hashes = [ + "h1:Fnaec9vA8sZ8BXVlN3Xn9Jz3zghSETIKg7ch8oXhxno=", + "zh:04ceb65210251339f07cd4611885d242cd4d0c7306e86dda9785396807c00451", + "zh:448f56199f3e99ff75d5c0afacae867ee795e4dfda6cb5f8e3b2a72ec3583dd8", + "zh:4b4c11ccfba7319e901df2dac836b1ae8f12185e37249e8d870ee10bb87a13fe", + "zh:4fa45c44c0de582c2edb8a2e054f55124520c16a39b2dfc0355929063b6395b1", + "zh:588508280501a06259e023b0695f6a18149a3816d259655c424d068982cbdd36", + "zh:737c4d99a87d2a4d1ac0a54a73d2cb62974ccb2edbd234f333abd079a32ebc9e", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:a357ab512e5ebc6d1fda1382503109766e21bbfdfaa9ccda43d313c122069b30", + "zh:c51bfb15e7d52cc1a2eaec2a903ac2aff15d162c172b1b4c17675190e8147615", + "zh:e0951ee6fa9df90433728b96381fb867e3db98f66f735e0c3e24f8f16903f0ad", + "zh:e3cdcb4e73740621dabd82ee6a37d6cfce7fee2a03d8074df65086760f5cf556", + "zh:eff58323099f1bd9a0bec7cb04f717e7f1b2774c7d612bf7581797e1622613a0", + ] +} + +provider "registry.terraform.io/hashicorp/time" { + version = "0.12.1" + constraints = ">= 0.5.0" + hashes = [ + "h1:6BhxSYBJdBBKyuqatOGkuPKVenfx6UmLdiI13Pb3his=", + "zh:090023137df8effe8804e81c65f636dadf8f9d35b79c3afff282d39367ba44b2", + "zh:26f1e458358ba55f6558613f1427dcfa6ae2be5119b722d0b3adb27cd001efea", + "zh:272ccc73a03384b72b964918c7afeb22c2e6be22460d92b150aaf28f29a7d511", + "zh:438b8c74f5ed62fe921bd1078abe628a6675e44912933100ea4fa26863e340e9", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:85c8bd8eefc4afc33445de2ee7fbf33a7807bc34eb3734b8eefa4e98e4cddf38", + "zh:98bbe309c9ff5b2352de6a047e0ec6c7e3764b4ed3dfd370839c4be2fbfff869", + "zh:9c7bf8c56da1b124e0e2f3210a1915e778bab2be924481af684695b52672891e", + "zh:d2200f7f6ab8ecb8373cda796b864ad4867f5c255cff9d3b032f666e4c78f625", + "zh:d8c7926feaddfdc08d5ebb41b03445166df8c125417b28d64712dccd9feef136", + "zh:e2412a192fc340c61b373d6c20c9d805d7d3dee6c720c34db23c2a8ff0abd71b", + "zh:e6ac6bba391afe728a099df344dbd6481425b06d61697522017b8f7a59957d44", + ] +} diff --git a/infra/terraform/dev-org/dev-project-cleanup/backend.tf b/infra/terraform/dev-org/dev-project-cleanup/backend.tf index 88d944223c9..fa9233c8776 100644 --- a/infra/terraform/dev-org/dev-project-cleanup/backend.tf +++ b/infra/terraform/dev-org/dev-project-cleanup/backend.tf @@ -20,4 +20,3 @@ terraform { prefix = "state/test-cleanup" } } - diff --git a/infra/terraform/dev-org/dev-project-cleanup/cleanup.tf b/infra/terraform/dev-org/dev-project-cleanup/cleanup.tf index 31a86fcaab1..e8087d7e2db 100644 --- a/infra/terraform/dev-org/dev-project-cleanup/cleanup.tf +++ b/infra/terraform/dev-org/dev-project-cleanup/cleanup.tf @@ -16,14 +16,14 @@ module "app-engine" { source = "terraform-google-modules/project-factory/google//modules/app_engine" - version = "~> 9.0" + version = "~> 17.0" location_id = local.app_location project_id = module.project.project_id } module "projects_cleanup" { - source = "terraform-google-modules/scheduled-function/google//modules/project_cleanup" - version = "~> 1.5.1" + source = "terraform-google-modules/scheduled-function/google//modules/project_cleanup" + version = "~> 6.0" job_schedule = "17 * * * *" max_project_age_in_hours = "24" @@ -32,5 +32,5 @@ module "projects_cleanup" { region = local.region target_excluded_labels = local.exclude_labels target_folder_id = local.cleanup_folder + function_docker_registry = "ARTIFACT_REGISTRY" } - diff --git a/infra/terraform/dev-org/dev-project-cleanup/locals.tf b/infra/terraform/dev-org/dev-project-cleanup/locals.tf index da34e198b3e..32e59fcbfb2 100644 --- a/infra/terraform/dev-org/dev-project-cleanup/locals.tf +++ b/infra/terraform/dev-org/dev-project-cleanup/locals.tf @@ -22,4 +22,3 @@ locals { region = "us-central1" app_location = "us-central" } - diff --git a/infra/terraform/dev-org/dev-project-cleanup/outputs.tf b/infra/terraform/dev-org/dev-project-cleanup/outputs.tf index d77f03cdfaf..8892f5f348b 100644 --- a/infra/terraform/dev-org/dev-project-cleanup/outputs.tf +++ b/infra/terraform/dev-org/dev-project-cleanup/outputs.tf @@ -21,4 +21,3 @@ output "project_id" { output "excluded_labels" { value = local.exclude_labels } - diff --git a/infra/terraform/dev-org/dev-project-cleanup/project.tf b/infra/terraform/dev-org/dev-project-cleanup/project.tf index a448b5f39dd..b11a1f8adc1 100644 --- a/infra/terraform/dev-org/dev-project-cleanup/project.tf +++ b/infra/terraform/dev-org/dev-project-cleanup/project.tf @@ -21,16 +21,14 @@ resource "google_folder" "cft-dev-management" { module "project" { source = "terraform-google-modules/project-factory/google" - version = "~> 9.0" - - name = "cft-project-manager" - random_project_id = true - org_id = local.org_id - folder_id = google_folder.cft-dev-management.id - billing_account = local.billing_account - labels = local.exclude_labels - skip_gcloud_download = true + version = "~> 17.0" + name = "cft-project-manager" + random_project_id = true + org_id = local.org_id + folder_id = google_folder.cft-dev-management.id + billing_account = local.billing_account + labels = local.exclude_labels activate_apis = [ "cloudresourcemanager.googleapis.com", diff --git a/infra/terraform/dev-org/dev-project-cleanup/versions.tf b/infra/terraform/dev-org/dev-project-cleanup/versions.tf index b2084533b11..e737ef3f131 100644 --- a/infra/terraform/dev-org/dev-project-cleanup/versions.tf +++ b/infra/terraform/dev-org/dev-project-cleanup/versions.tf @@ -15,25 +15,27 @@ */ terraform { - required_version = ">= 0.12" -} - -provider "archive" { - version = "~> 1.3" -} - -provider "google" { - version = "~> 3.35" -} - -provider "google-beta" { - version = "~> 3.35" -} - -provider "null" { - version = "~> 2.1" -} - -provider "random" { - version = "~> 2.2" + required_version = ">= 1.4.4" + required_providers { + archive = { + source = "hashicorp/archive" + version = ">= 1.3, < 3" + } + google = { + source = "hashicorp/google" + version = ">= 3.35, < 7" + } + google-beta = { + source = "hashicorp/google-beta" + version = ">= 3.35, < 7" + } + null = { + source = "hashicorp/null" + version = ">= 2.1, < 4" + } + random = { + source = "hashicorp/random" + version = ">= 2.2, < 4" + } + } } diff --git a/infra/terraform/modules/branch_protection/main.tf b/infra/terraform/modules/branch_protection/main.tf new file mode 100644 index 00000000000..9541c330a08 --- /dev/null +++ b/infra/terraform/modules/branch_protection/main.tf @@ -0,0 +1,51 @@ +/** + * Copyright 2022-2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +resource "github_branch_protection" "default" { + for_each = var.repo_list + repository_id = each.value.node_id + pattern = each.value.default_branch + + required_pull_request_reviews { + required_approving_review_count = 1 + require_code_owner_reviews = true + pull_request_bypassers = setunion( + [var.admin], + formatlist("/%s", lookup(var.repos_map[each.key], "admins", [])) + ) + } + + required_status_checks { + strict = true + contexts = [ + "cla/google", + "${each.key}-int-trigger (cloud-foundation-cicd)", + "lint", + "conventionalcommits.org" + ] + } + + enforce_admins = false + + restrict_pushes { + push_allowances = setunion( + [var.admin], + formatlist("/%s", setunion(lookup(var.repos_map[each.key], "admins", []), lookup(var.repos_map[each.key], "maintainers", []))), + formatlist("${var.repos_map[each.key].org}/%s", lookup(var.repos_map[each.key], "groups", [])) + ) + blocks_creations = false + } +} diff --git a/infra/terraform/modules/branch_protection/variables.tf b/infra/terraform/modules/branch_protection/variables.tf new file mode 100644 index 00000000000..ca11a343195 --- /dev/null +++ b/infra/terraform/modules/branch_protection/variables.tf @@ -0,0 +1,40 @@ +/** + * Copyright 2022-2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/****************************************** + Required variables +*******************************************/ + +variable "repo_list" { + description = "List of Repos" + type = map(any) +} + +variable "repos_map" { + description = "Map of Repos" + type = map(object({ + name = string + org = string + admins = optional(list(string), []) + maintainers = optional(list(string), []) + groups = optional(list(string), []) + })) +} + +variable "admin" { + description = "GitHub Admin" + type = string +} diff --git a/infra/terraform/modules/branch_protection/versions.tf b/infra/terraform/modules/branch_protection/versions.tf new file mode 100644 index 00000000000..9fe5d28d303 --- /dev/null +++ b/infra/terraform/modules/branch_protection/versions.tf @@ -0,0 +1,24 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +terraform { + required_providers { + github = { + source = "integrations/github" + version = "~> 6.0" + } + } +} diff --git a/infra/terraform/modules/codeowners_file/main.tf b/infra/terraform/modules/codeowners_file/main.tf new file mode 100644 index 00000000000..10666aac24d --- /dev/null +++ b/infra/terraform/modules/codeowners_file/main.tf @@ -0,0 +1,38 @@ +/** + * Copyright 2022-2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +locals { + commit_author = "CFT Bot" + commit_email = "cloud-foundation-bot@google.com" + owners = { for value in var.repos_map : value.name => "${join(" ", formatlist("@%s", sort(setunion(lookup(value, "admins", []), lookup(value, "maintainers", [])))))} " if length(setunion(lookup(value, "admins", []), lookup(value, "maintainers", []))) > 0 } + groups = { for value in var.repos_map : value.name => "${join(" ", formatlist("@${value.org}/%s", value.groups))} " if length(value.groups) > 0 } + header = "# NOTE: This file is automatically generated from values at:\n# https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/blob/master/infra/terraform/test-org/org/locals.tf\n" + footer_prefix = "# NOTE: GitHub CODEOWNERS locations:\n# https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners#codeowners-and-branch-protection\n" + footer_code = "CODEOWNERS @${var.org}/${var.owner}\n.github/CODEOWNERS @${var.org}/${var.owner}\ndocs/CODEOWNERS @${var.org}/${var.owner}\n" + footer = "\n${local.footer_prefix}\n${local.footer_code}\n" +} + +resource "github_repository_file" "CODEOWNERS" { + for_each = var.repo_list + repository = each.key + branch = each.value.default_branch + file = "CODEOWNERS" + commit_message = "chore: update CODEOWNERS" + commit_author = local.commit_author + commit_email = local.commit_email + overwrite_on_create = true + content = "${trimspace("${local.header}\n* @${var.org}/${var.owner} ${try(local.owners[each.key], "")}${try(local.groups[each.key], "")}")}\n${local.footer}" +} diff --git a/infra/terraform/modules/codeowners_file/variables.tf b/infra/terraform/modules/codeowners_file/variables.tf new file mode 100644 index 00000000000..bc53217c3b9 --- /dev/null +++ b/infra/terraform/modules/codeowners_file/variables.tf @@ -0,0 +1,46 @@ +/** + * Copyright 2022-2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/****************************************** + Required variables +*******************************************/ + +variable "org" { + description = "GitHub Org" + type = string +} + +variable "owner" { + description = "Primary owner" + type = string + nullable = false +} + +variable "repos_map" { + description = "Map of Repos" + type = map(object({ + name = string + org = string + admins = optional(list(string), []) + maintainers = optional(list(string), []) + groups = optional(list(string), []) + })) +} + +variable "repo_list" { + description = "List of Repos" + type = map(any) +} diff --git a/infra/terraform/modules/codeowners_file/versions.tf b/infra/terraform/modules/codeowners_file/versions.tf new file mode 100644 index 00000000000..9fe5d28d303 --- /dev/null +++ b/infra/terraform/modules/codeowners_file/versions.tf @@ -0,0 +1,24 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +terraform { + required_providers { + github = { + source = "integrations/github" + version = "~> 6.0" + } + } +} diff --git a/infra/terraform/modules/repo_file/main.tf b/infra/terraform/modules/repo_file/main.tf new file mode 100644 index 00000000000..0a8f3b52869 --- /dev/null +++ b/infra/terraform/modules/repo_file/main.tf @@ -0,0 +1,32 @@ +/** + * Copyright 2022-2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +locals { + commit_author = "CFT Bot" + commit_email = "cloud-foundation-bot@google.com" +} + +resource "github_repository_file" "file" { + for_each = var.repo_list + repository = each.key + branch = each.value.default_branch + file = var.filename + commit_message = "chore: update ${var.filename}" + commit_author = local.commit_author + commit_email = local.commit_email + overwrite_on_create = true + content = var.content +} diff --git a/infra/terraform/modules/repo_file/variables.tf b/infra/terraform/modules/repo_file/variables.tf new file mode 100644 index 00000000000..01fb28ed7bb --- /dev/null +++ b/infra/terraform/modules/repo_file/variables.tf @@ -0,0 +1,34 @@ +/** + * Copyright 2022-2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/****************************************** + Required variables +*******************************************/ + +variable "repo_list" { + description = "List of Repos" + type = map(any) +} + +variable "filename" { + description = "Filename" + type = string +} + +variable "content" { + description = "File content" + type = string +} diff --git a/infra/terraform/modules/repo_file/versions.tf b/infra/terraform/modules/repo_file/versions.tf new file mode 100644 index 00000000000..9fe5d28d303 --- /dev/null +++ b/infra/terraform/modules/repo_file/versions.tf @@ -0,0 +1,24 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +terraform { + required_providers { + github = { + source = "integrations/github" + version = "~> 6.0" + } + } +} diff --git a/infra/terraform/modules/repo_labels/main.tf b/infra/terraform/modules/repo_labels/main.tf new file mode 100644 index 00000000000..22f443d7c27 --- /dev/null +++ b/infra/terraform/modules/repo_labels/main.tf @@ -0,0 +1,46 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +provider "github" { + owner = var.org +} + +locals { + repo_labels = { + for o in flatten([ + for repo in var.repo_list : + [ + for label in var.labels : + { + "repo" : repo, + "label" : label.name, + "color" : label.color, + "description" : label.description + } + ] + ]) : + "${o.repo}/${o.label}" => o + } +} + +# Create labels on all repos +resource "github_issue_label" "test_repo" { + for_each = local.repo_labels + repository = each.value.repo + name = each.value.label + color = each.value.color + description = each.value.description +} diff --git a/infra/terraform/modules/repo_labels/variables.tf b/infra/terraform/modules/repo_labels/variables.tf new file mode 100644 index 00000000000..456a9f2c7dc --- /dev/null +++ b/infra/terraform/modules/repo_labels/variables.tf @@ -0,0 +1,34 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/****************************************** + Required variables +*******************************************/ + +variable "org" { + description = "GitHub Org" + type = string +} + +variable "repo_list" { + description = "List of Repos" + type = list(any) +} + +variable "labels" { + description = "Labels" + type = list(any) +} diff --git a/infra/terraform/modules/repo_labels/versions.tf b/infra/terraform/modules/repo_labels/versions.tf new file mode 100644 index 00000000000..9fe5d28d303 --- /dev/null +++ b/infra/terraform/modules/repo_labels/versions.tf @@ -0,0 +1,24 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +terraform { + required_providers { + github = { + source = "integrations/github" + version = "~> 6.0" + } + } +} diff --git a/infra/terraform/modules/repositories/main.tf b/infra/terraform/modules/repositories/main.tf new file mode 100644 index 00000000000..01447628212 --- /dev/null +++ b/infra/terraform/modules/repositories/main.tf @@ -0,0 +1,125 @@ +/** + * Copyright 2022-2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +locals { + admins = flatten([ + for repo, val in var.repos_map : [ + for admin in val.admins : { + "repo" : repo + "admin" : admin + } + ] + ]) + + maintainers = flatten([ + for repo, val in var.repos_map : [ + for maintainer in val.maintainers : { + "repo" : repo + "maintainer" : maintainer + } + ] + ]) + + groups = flatten([ + for repo, val in var.repos_map : [ + for group in val.groups : { + "repo" : repo + "group" : group + } + ] + ]) + + teams = flatten([ + for repo, val in var.repos_map : [ + for team in var.ci_teams : { + "repo" : repo + "team" : team + } + ] + ]) + +} + +resource "github_repository" "repo" { + for_each = var.repos_map + name = each.value.name + description = each.value.description + homepage_url = coalesce(each.value.homepage_url, "https://registry.terraform.io/modules/${each.value.org}/${trimprefix(each.value.name, "terraform-google-")}/google") + topics = setunion(["cft-terraform"], try(split(",", trimspace(each.value.topics)), [])) + + allow_merge_commit = false + allow_rebase_merge = false + allow_update_branch = true + allow_auto_merge = true + delete_branch_on_merge = true + has_issues = true + has_projects = false + has_wiki = false + vulnerability_alerts = true + has_downloads = false + squash_merge_commit_message = "BLANK" + squash_merge_commit_title = "PR_TITLE" +} + +resource "github_repository_collaborator" "dpebot" { + for_each = github_repository.repo + repository = each.value.name + username = "dpebot" + permission = "pull" +} + +resource "github_repository_collaborator" "cftbot" { + for_each = github_repository.repo + repository = each.value.name + username = "cloud-foundation-bot" + permission = "admin" +} + +resource "github_repository_collaborator" "admins" { + for_each = { + for v in local.admins : "${v.repo}/${v.admin}" => v + } + repository = each.value.repo + username = each.value.admin + permission = "maintain" +} + +resource "github_repository_collaborator" "maintainers" { + for_each = { + for v in local.maintainers : "${v.repo}/${v.maintainer}" => v + } + repository = each.value.repo + username = each.value.maintainer + permission = "push" +} + +resource "github_team_repository" "groups" { + for_each = { + for v in local.groups : "${v.repo}/${v.group}" => v + } + repository = each.value.repo + team_id = each.value.group + permission = "push" +} + +resource "github_team_repository" "ci_teams" { + for_each = { + for v in local.teams : "${v.repo}/${v.team}" => v + } + repository = each.value.repo + team_id = each.value.team + permission = "push" +} diff --git a/infra/terraform/modules/repositories/outputs.tf b/infra/terraform/modules/repositories/outputs.tf new file mode 100644 index 00000000000..2fb4140f6a8 --- /dev/null +++ b/infra/terraform/modules/repositories/outputs.tf @@ -0,0 +1,19 @@ +/** + * Copyright 2022-2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +output "repos" { + value = { for value in github_repository.repo : value.name => value } +} diff --git a/infra/terraform/modules/repositories/variables.tf b/infra/terraform/modules/repositories/variables.tf new file mode 100644 index 00000000000..9342f2f7ad7 --- /dev/null +++ b/infra/terraform/modules/repositories/variables.tf @@ -0,0 +1,41 @@ +/** + * Copyright 2022-2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/****************************************** + Required variables +*******************************************/ + +variable "repos_map" { + description = "Map of Repos" + type = map(object({ + name = string + short_name = optional(string) + org = string + description = optional(string) + admins = optional(list(string), []) + maintainers = optional(list(string), []) + homepage_url = optional(string, null) + module = optional(bool, true) + topics = optional(string) + groups = optional(list(string), []) + })) +} + +variable "ci_teams" { + description = "Repo collaborator teams for approved CI" + type = list(string) + default = [] +} diff --git a/infra/terraform/modules/repositories/versions.tf b/infra/terraform/modules/repositories/versions.tf new file mode 100644 index 00000000000..9fe5d28d303 --- /dev/null +++ b/infra/terraform/modules/repositories/versions.tf @@ -0,0 +1,24 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +terraform { + required_providers { + github = { + source = "integrations/github" + version = "~> 6.0" + } + } +} diff --git a/infra/terraform/modules/variables/main.tf b/infra/terraform/modules/variables/main.tf deleted file mode 100644 index aaeddcafec2..00000000000 --- a/infra/terraform/modules/variables/main.tf +++ /dev/null @@ -1,20 +0,0 @@ -variable "name_prefix" { - default = "cicd" - description = "Common prefix for naming resources such as networks and k8s clusters." -} - -variable "project_id" { - default = "cloud-foundation-cicd" - description = "ID of project where all CICD resources will be launched." -} - -variable "region" { - default = { - primary = "us-west1" - } - description = "GCP region to launch resources in. Keys should correspond to Terraform workspaces." -} - -variable "phoogle_billing_account" { - default = "01E8A0-35F760-5CF02A" -} diff --git a/infra/terraform/modules/variables/outputs.tf b/infra/terraform/modules/variables/outputs.tf deleted file mode 100644 index 98acb372f61..00000000000 --- a/infra/terraform/modules/variables/outputs.tf +++ /dev/null @@ -1,4 +0,0 @@ -output "name_prefix" { value = "${var.name_prefix}" } -output "region" { value = "${var.region}" } -output "project_id" { value = "${var.project_id}" } -output "phoogle_billing_account" { value = "${var.phoogle_billing_account}" } diff --git a/infra/terraform/modules/workflow_files/lint.yaml.tftpl b/infra/terraform/modules/workflow_files/lint.yaml.tftpl new file mode 100644 index 00000000000..7f2fefb8935 --- /dev/null +++ b/infra/terraform/modules/workflow_files/lint.yaml.tftpl @@ -0,0 +1,56 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# NOTE: This file is automatically generated from values at: +# https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/blob/master/infra/terraform/test-org/org/locals.tf + +name: 'lint' + +on: + workflow_dispatch: + pull_request: + branches: + - ${branch} + +concurrency: + group: '$${{ github.workflow }}-$${{ github.head_ref || github.ref }}' + cancel-in-progress: true + +jobs: + lint: + name: 'lint' + runs-on: 'ubuntu-latest' + steps: + - uses: 'actions/checkout@v4' + - id: variables + run: | + MAKEFILE=$(find . -name Makefile -print -quit) + if [ -z "$MAKEFILE" ]; then + echo dev-tools=gcr.io/cloud-foundation-cicd/cft/developer-tools:1 >> "$GITHUB_OUTPUT" + else + VERSION=$(grep "DOCKER_TAG_VERSION_DEVELOPER_TOOLS := " $MAKEFILE | cut -d\ -f3) + IMAGE=$(grep "DOCKER_IMAGE_DEVELOPER_TOOLS := " $MAKEFILE | cut -d\ -f3) + REGISTRY=$(grep "REGISTRY_URL := " $MAKEFILE | cut -d\ -f3) + echo dev-tools=$${REGISTRY}/$${IMAGE}:$${VERSION} >> "$GITHUB_OUTPUT" + fi + - run: docker run --rm %{if lint_env != null }%{ for key, value in lint_env ~}-e ${key} %{ endfor ~}%{ endif }-v $${{ github.workspace }}:/workspace $${{ steps.variables.outputs.dev-tools }} module-swapper%{if lint_env != null } + env: +%{ for key, value in lint_env ~} + ${key}: ${value} +%{ endfor ~}%{ endif } + - run: docker run --rm %{if lint_env != null }%{ for key, value in lint_env ~}-e ${key} %{ endfor ~}%{ endif }-v $${{ github.workspace }}:/workspace $${{ steps.variables.outputs.dev-tools }} /usr/local/bin/test_lint.sh%{if lint_env != null } + env: +%{ for key, value in lint_env ~} + ${key}: ${value} +%{ endfor ~}%{ endif } diff --git a/infra/terraform/modules/workflow_files/main.tf b/infra/terraform/modules/workflow_files/main.tf new file mode 100644 index 00000000000..b6cbc8d112a --- /dev/null +++ b/infra/terraform/modules/workflow_files/main.tf @@ -0,0 +1,44 @@ +/** + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +locals { + commit_author = "CFT Bot" + commit_email = "cloud-foundation-bot@google.com" +} + +resource "github_repository_file" "file" { + for_each = { for k, v in var.repo_list : k => v if var.repos_map[k].disable_lint_yaml != true } + repository = each.key + branch = each.value.default_branch + file = ".github/workflows/lint.yaml" + commit_message = "chore: update .github/workflows/lint.yaml" + commit_author = local.commit_author + commit_email = local.commit_email + overwrite_on_create = true + content = templatefile("${path.module}/lint.yaml.tftpl", { branch = each.value.default_branch, lint_env = var.repos_map[each.value.name].lint_env }) +} + +resource "github_repository_file" "reporter" { + for_each = { for k, v in var.repo_list : k => v if var.repos_map[k].enable_periodic == true } + repository = each.key + branch = each.value.default_branch + file = ".github/workflows/periodic-reporter.yaml" + commit_message = "chore: update .github/workflows/periodic-reporter.yaml" + commit_author = local.commit_author + commit_email = local.commit_email + overwrite_on_create = true + content = file("${path.module}/periodic-reporter.yaml") +} diff --git a/infra/terraform/modules/workflow_files/periodic-reporter.yaml b/infra/terraform/modules/workflow_files/periodic-reporter.yaml new file mode 100644 index 00000000000..82ec8355c7d --- /dev/null +++ b/infra/terraform/modules/workflow_files/periodic-reporter.yaml @@ -0,0 +1,159 @@ +# Copyright 2023-2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# NOTE: This file is automatically generated from: +# https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/blob/master/infra/terraform/modules/workflow_files/periodic-reporter.yaml + +name: 'reporter' + +on: + schedule: + # 2 hours after scheduled periodic and once again in the evening + - cron: '0 5,17 * * *' + workflow_dispatch: + +jobs: + report: + if: github.repository_owner == 'GoogleCloudPlatform' || github.repository_owner == 'terraform-google-modules' + + permissions: + issues: 'write' + + runs-on: 'ubuntu-latest' + + steps: + - uses: 'actions/github-script@v7' + with: + script: |- + // label for all issues opened by reporter + const periodicLabel = 'periodic-failure'; + + // check if any reporter opened any issues previously + const prevIssues = await github.paginate(github.rest.issues.listForRepo, { + ...context.repo, + state: 'open', + creator: 'github-actions[bot]', + labels: [periodicLabel] + }); + // createOrCommentIssue creates a new issue or comments on an existing issue. + const createOrCommentIssue = async function (title, txt) { + if (prevIssues.length < 1) { + console.log('no previous issues found, creating one'); + await github.rest.issues.create({ + ...context.repo, + title: title, + body: txt, + labels: [periodicLabel] + }); + return; + } + if (prevIssues.length > 1) { + console.warn( + `found ${prevIssues.length} issues but only adding comment to ${prevIssues[0].html_url}` + ); + } + console.log( + `found previous issue ${prevIssues[0].html_url}, adding comment` + ); + await github.rest.issues.createComment({ + ...context.repo, + issue_number: prevIssues[0].number, + body: txt + }); + }; + + // updateAndCloseIssues comments on any existing issues and closes them. No-op if no issue exists. + const updateAndCloseIssues = async function (txt) { + if (prevIssues.length < 1) { + console.log('no previous issues found, skipping close'); + return; + } + for (const prevIssue of prevIssues) { + console.log(`found previous issue ${prevIssue.html_url}, adding comment`); + await github.rest.issues.createComment({ + ...context.repo, + issue_number: prevIssue.number, + body: txt + }); + console.log(`closing ${prevIssue.html_url}`); + await github.rest.issues.update({ + ...context.repo, + issue_number: prevIssue.number, + body: txt, + state: 'closed' + }); + } + }; + + // Find status of check runs. + // We will find check runs for each commit and then filter for the periodic. + // Checks API only allows for ref and if we use main there could be edge cases where + // the check run happened on a SHA that is different from head. + const commits = await github.paginate(github.rest.repos.listCommits, { + ...context.repo + }); + + var foundCheck = false; + let periodicCheck = {}; + + for (const commit of commits) { + console.log( + `checking runs at ${commit.html_url}: ${commit.commit.message}` + ); + const checks = await github.rest.checks.listForRef({ + ...context.repo, + ref: commit.sha + }); + // find runs for this commit + for (const check of checks.data.check_runs) { + console.log(`found run ${check.name} for ${commit.html_url}`); + if (check.name.includes('periodic-int-trigger')) { + foundCheck = true; + periodicCheck = check; + break; + } + } + + if (foundCheck) { + if ( + periodicCheck.status === 'completed' && + periodicCheck.conclusion === 'success' + ) { + updateAndCloseIssues( + `[Passing periodic](${periodicCheck.html_url}) at ${commit.html_url}. Closing this issue.` + ); + } else if (periodicCheck.status === 'in_progress') { + console.log( + `Check is pending ${periodicCheck.html_url} for ${commit.html_url}. Retry again later.` + ); + } + // error case + else { + createOrCommentIssue( + 'Failing periodic', + `[Failing periodic](${periodicCheck.html_url}) at ${commit.html_url}.` + ); + } + // exit early as check was found + return; + } + } + + // no periodic-int-trigger checks found across all commits, report it + createOrCommentIssue( + 'Missing periodic', + `Periodic test has not run in the past 24hrs. Last checked from ${ + commits[0].html_url + } to ${commits[commits.length - 1].html_url}.` + ); diff --git a/infra/terraform/modules/workflow_files/variables.tf b/infra/terraform/modules/workflow_files/variables.tf new file mode 100644 index 00000000000..9ff772e5729 --- /dev/null +++ b/infra/terraform/modules/workflow_files/variables.tf @@ -0,0 +1,34 @@ +/** + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/****************************************** + Required variables +*******************************************/ + +variable "repos_map" { + description = "Map of Repos" + type = map(object({ + name = string + lint_env = optional(map(string)) + disable_lint_yaml = optional(bool) + enable_periodic = optional(bool) + })) +} + +variable "repo_list" { + description = "List of Repos" + type = map(any) +} diff --git a/infra/terraform/modules/workflow_files/versions.tf b/infra/terraform/modules/workflow_files/versions.tf new file mode 100644 index 00000000000..a18e05b5c55 --- /dev/null +++ b/infra/terraform/modules/workflow_files/versions.tf @@ -0,0 +1,24 @@ +/** + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +terraform { + required_providers { + github = { + source = "integrations/github" + version = "~> 6.0" + } + } +} diff --git a/infra/terraform/terraform_backend/phoogle.tf b/infra/terraform/terraform_backend/phoogle.tf deleted file mode 100644 index c602bedbc92..00000000000 --- a/infra/terraform/terraform_backend/phoogle.tf +++ /dev/null @@ -1,18 +0,0 @@ -# Legacy Terraform state for deleted Phoogle projects -module "phoogle-backend" { - source = "terraform-google-modules/cloud-storage/google//modules/simple_bucket" - version = "~> 1.3" - - name = "cloud-foundation-cicd-tfstate" - project_id = module.variables.project_id - location = "US" -} - -module "phoogle-seed" { - source = "terraform-google-modules/cloud-storage/google//modules/simple_bucket" - version = "~> 1.3" - - name = "cloud-foundation-cicd-seed-projects-tfstate" - project_id = module.variables.project_id - location = "US" -} diff --git a/infra/terraform/terraform_backend/terraform.tfstate b/infra/terraform/terraform_backend/terraform.tfstate deleted file mode 100644 index 160143af28c..00000000000 --- a/infra/terraform/terraform_backend/terraform.tfstate +++ /dev/null @@ -1,103 +0,0 @@ -{ - "version": 4, - "terraform_version": "0.12.20", - "serial": 6, - "lineage": "39c3f0b9-0ef6-2c65-4541-f514715e1d01", - "outputs": {}, - "resources": [ - { - "module": "module.phoogle-seed", - "mode": "managed", - "type": "google_storage_bucket", - "name": "bucket", - "provider": "provider.google", - "instances": [ - { - "schema_version": 0, - "attributes": { - "bucket_policy_only": true, - "cors": [], - "default_event_based_hold": false, - "encryption": [], - "force_destroy": false, - "id": "cloud-foundation-cicd-seed-projects-tfstate", - "labels": {}, - "lifecycle_rule": [], - "location": "US", - "logging": [], - "name": "cloud-foundation-cicd-seed-projects-tfstate", - "project": "cloud-foundation-cicd", - "requester_pays": false, - "retention_policy": [], - "self_link": "https://www.googleapis.com/storage/v1/b/cloud-foundation-cicd-seed-projects-tfstate", - "storage_class": "STANDARD", - "url": "gs://cloud-foundation-cicd-seed-projects-tfstate", - "versioning": [ - { - "enabled": true - } - ], - "website": [] - }, - "private": "bnVsbA==" - } - ] - }, - { - "module": "module.phoogle-backend", - "mode": "managed", - "type": "google_storage_bucket", - "name": "bucket", - "provider": "provider.google", - "instances": [ - { - "schema_version": 0, - "attributes": { - "bucket_policy_only": true, - "cors": [], - "default_event_based_hold": false, - "encryption": [], - "force_destroy": false, - "id": "cloud-foundation-cicd-tfstate", - "labels": {}, - "lifecycle_rule": [], - "location": "US", - "logging": [], - "name": "cloud-foundation-cicd-tfstate", - "project": "cloud-foundation-cicd", - "requester_pays": false, - "retention_policy": [], - "self_link": "https://www.googleapis.com/storage/v1/b/cloud-foundation-cicd-tfstate", - "storage_class": "STANDARD", - "url": "gs://cloud-foundation-cicd-tfstate", - "versioning": [ - { - "enabled": true - } - ], - "website": [] - }, - "private": "bnVsbA==" - } - ] - }, - { - "module": "module.phoogle-seed", - "mode": "managed", - "type": "google_storage_bucket_iam_member", - "name": "members", - "each": "map", - "provider": "provider.google", - "instances": [] - }, - { - "module": "module.phoogle-backend", - "mode": "managed", - "type": "google_storage_bucket_iam_member", - "name": "members", - "each": "map", - "provider": "provider.google", - "instances": [] - } - ] -} diff --git a/infra/terraform/terraform_backend/variables.tf b/infra/terraform/terraform_backend/variables.tf deleted file mode 100644 index 0c0f9ce0c90..00000000000 --- a/infra/terraform/terraform_backend/variables.tf +++ /dev/null @@ -1 +0,0 @@ -module "variables" { source = "../modules/variables" } diff --git a/infra/terraform/test-org/ci-comment-build-trigger-function/backend.tf b/infra/terraform/test-org/ci-comment-build-trigger-function/backend.tf deleted file mode 100644 index a7775dd4270..00000000000 --- a/infra/terraform/test-org/ci-comment-build-trigger-function/backend.tf +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Copyright 2019 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -terraform { - backend "gcs" { - bucket = "cft-infra-test-tfstate" - prefix = "state/ci-pr-comment-function" - } -} diff --git a/infra/terraform/test-org/ci-comment-build-trigger-function/function.tf b/infra/terraform/test-org/ci-comment-build-trigger-function/function.tf deleted file mode 100644 index 88136fb1b01..00000000000 --- a/infra/terraform/test-org/ci-comment-build-trigger-function/function.tf +++ /dev/null @@ -1,56 +0,0 @@ -/** - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -locals { - project_id = "cloud-foundation-cicd" -} - -module "gcf_service_account" { - source = "terraform-google-modules/service-accounts/google" - version = "~> 3.0" - project_id = local.project_id - names = ["pr-comment-cf-sa"] - project_roles = [ - "${local.project_id}=>roles/cloudbuild.builds.editor" - ] -} - -resource "random_id" "suffix" { - byte_length = 4 -} - -module "pr_comment_build_function" { - source = "terraform-google-modules/event-function/google" - version = "~> 1.0" - name = "pr-comment-downstream-builder-${random_id.suffix.hex}" - project_id = local.project_id - region = "us-central1" - description = "Launches a downstream build that comments on a PR." - entry_point = "main" - runtime = "python37" - source_directory = "${path.module}/function_source" - service_account_email = module.gcf_service_account.email - bucket_force_destroy = true - - environment_variables = { - CLOUDBUILD_PROJECT = local.project_id - } - - event_trigger = { - event_type = "google.pubsub.topic.publish" - resource = "projects/${local.project_id}/topics/cloud-builds" - } -} diff --git a/infra/terraform/test-org/ci-comment-build-trigger-function/function_source/main.py b/infra/terraform/test-org/ci-comment-build-trigger-function/function_source/main.py deleted file mode 100644 index 81cf99b4615..00000000000 --- a/infra/terraform/test-org/ci-comment-build-trigger-function/function_source/main.py +++ /dev/null @@ -1,139 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import base64 -import sys -import os -import json -import logging -import requests - -from google.cloud.devtools.cloudbuild_v1 import CloudBuildClient as cloudbuild -from google.cloud.devtools.cloudbuild_v1.types import BuildStep, Build, BuildOptions -from google.protobuf import duration_pb2 as duration - -CFT_TOOLS_DEFAULT_IMAGE = 'gcr.io/cloud-foundation-cicd/cft/developer-tools' -CFT_TOOLS_DEFAULT_IMAGE_VERSION = '0.12' -DISABLED_MODULES = ["terraform-example-foundation"] - - -def main(event, context): - """ Triggers a new downstream build based on a PubSub message originating from a parent cloudbuild """ - # if cloud build project is not set, exit - if not os.getenv('CLOUDBUILD_PROJECT'): - logging.warn('Cloud Build project not set') - sys.exit(1) - # if no data in PubSub event, log and exit - if 'data' not in event: - logging.info('Unable to find data in PubSub event') - sys.exit(1) - # decode data in PubSub event - data = json.loads(base64.b64decode(event['data']).decode('utf-8')) - # if the parent build originated from CF, ignore - if data['substitutions'].get('_IS_TRIGGERED_BY_CF', False): - logging.warn('Triggered by CF, Ignoring') - return - logging.info('Parent build not triggered by CF') - # if parent build is not a lint build, ignore - if 'lint' not in data['tags']: - logging.warn('Parent build is not a lint build') - return - # if parent build has not started, or is in any other state, ignore - if data['status'] != 'WORKING': - logging.warn('Parent build is not in WORKING status') - return - logging.info('Parent build is in WORKING status') - # if repo ref for the parent build has disabled PR bot, ignore - if data['substitutions']['REPO_NAME'] in DISABLED_MODULES: - logging.warn('Comment bot is disabled for this repo') - return - if data['substitutions'].get('_DOCKER_TAG_VERSION_DEVELOPER_TOOLS', False): - logging.info( - f'Found _DOCKER_TAG_VERSION_DEVELOPER_TOOLS. Setting tools image version to {data["substitutions"]["_DOCKER_TAG_VERSION_DEVELOPER_TOOLS"]}' - ) - CFT_TOOLS_DEFAULT_IMAGE_VERSION = data['substitutions'][ - '_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - ] - # Cloud Build seems to have a bug where if a build is re run through Github UI, it will not set _PR_NUMBER or _HEAD_REPO_URL - # workaround using the GH API to infer PR number and _HEAD_REPO_URL - PR_NUMBER = data['substitutions'].get('_PR_NUMBER', False) - _HEAD_REPO_URL = data['substitutions'].get('_HEAD_REPO_URL', False) - # default clone repo step - get_repo_args = [ - '-c', - 'git clone $$REPO_URL . && git checkout $$COMMIT_SHA && git status', - ] - if not (PR_NUMBER or _HEAD_REPO_URL): - logging.warn('Unable to infer PR number via Cloud Build. Trying via GH API') - # get list of github PRs that have this SHA - response = requests.get( - f'https://api.github.com/search/issues?q={data["substitutions"]["COMMIT_SHA"]}' - ) - response.raise_for_status() - response_obj = response.json() - # if more than one PR, ignore - if response_obj['total_count'] != 1: - logging.info(f'Multiple associated PRs found. Exiting...') - return - # if only one PR, its safe to assume that is associated with parent build's PR - logging.info(f'One associated PR found: {response_obj["items"][0]["number"]}') - PR_NUMBER = response_obj['items'][0]['number'] - # get target repo URL - pr_url = response_obj['items'][0]['html_url'] - _HEAD_REPO_URL = pr_url[: pr_url.find('/pull')] - # fetch PR at head using PR number - get_repo_args = [ - '-c', - 'git clone $$REPO_URL . && git fetch origin pull/$$_PR_NUMBER/head:$$_PR_NUMBER && git checkout $$_PR_NUMBER && git show --name-only', - ] - - # prepare env vars - env = [ - f'_PR_NUMBER={PR_NUMBER}', - f'REPO_NAME={data["substitutions"]["REPO_NAME"]}', - f'REPO_URL={_HEAD_REPO_URL}', - f'COMMIT_SHA={data["substitutions"]["COMMIT_SHA"]}', - ] - get_repo_step = BuildStep( - name='gcr.io/cloud-builders/git', - env=env, - args=get_repo_args, - id='get_repo', - entrypoint='bash', - ) - # lint comment step - lint_args = [ - '-c', - 'source /usr/local/bin/task_helper_functions.sh && printenv && post_lint_status_pr_comment', - ] - lint_step = BuildStep( - name=f'{CFT_TOOLS_DEFAULT_IMAGE}:{CFT_TOOLS_DEFAULT_IMAGE_VERSION}', - env=env, - args=lint_args, - id='lint_comment', - entrypoint='/bin/bash', - ) - # substitutions - sub = { - '_IS_TRIGGERED_BY_CF': '1', - } - # create and trigger build - build = Build( - steps=[get_repo_step, lint_step], - options=BuildOptions(substitution_option='ALLOW_LOOSE'), - substitutions=sub, - timeout=duration.Duration(seconds=1200), - ) - response = cloudbuild().create_build(os.getenv('CLOUDBUILD_PROJECT'), build) - logging.info(response) diff --git a/infra/terraform/test-org/ci-comment-build-trigger-function/function_source/requirements.txt b/infra/terraform/test-org/ci-comment-build-trigger-function/function_source/requirements.txt deleted file mode 100644 index b072df4254e..00000000000 --- a/infra/terraform/test-org/ci-comment-build-trigger-function/function_source/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ - google-cloud-build==1.0.0 - protobuf3==0.2.1 diff --git a/infra/terraform/test-org/ci-comment-build-trigger-function/versions.tf b/infra/terraform/test-org/ci-comment-build-trigger-function/versions.tf deleted file mode 100644 index 141c17896f5..00000000000 --- a/infra/terraform/test-org/ci-comment-build-trigger-function/versions.tf +++ /dev/null @@ -1,20 +0,0 @@ -/** - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -provider "google" { - version = "~> 3.38.0" -} diff --git a/infra/terraform/test-org/ci-foundation/cloudbuild.yaml b/infra/terraform/test-org/ci-foundation/cloudbuild.yaml index badfdc19e3a..d87dc18c74f 100644 --- a/infra/terraform/test-org/ci-foundation/cloudbuild.yaml +++ b/infra/terraform/test-org/ci-foundation/cloudbuild.yaml @@ -20,7 +20,7 @@ steps: args: ['reconcile.sh'] substitutions: _DOCKER_IMAGE_DEVELOPER_TOOLS: 'cft/developer-tools' - _DOCKER_TAG_VERSION_DEVELOPER_TOOLS: '1.1' + _DOCKER_TAG_VERSION_DEVELOPER_TOOLS: '1.18' options: env: - 'FOUNDATION_CICD_PROJECT_ID=$_FOUNDATION_CICD_PROJECT_ID' diff --git a/infra/terraform/test-org/ci-foundation/reconcile.sh b/infra/terraform/test-org/ci-foundation/reconcile.sh index b3e98b0a261..906239bcbf4 100755 --- a/infra/terraform/test-org/ci-foundation/reconcile.sh +++ b/infra/terraform/test-org/ci-foundation/reconcile.sh @@ -16,17 +16,17 @@ set -e echo "Reconciling gcp-org in production" -BUILD_ID=$(gcloud beta builds triggers run gcp-org---terraform-apply --branch=production --project ${FOUNDATION_CICD_PROJECT_ID} --format="value(metadata.build.id)") -gcloud beta builds log $BUILD_ID --project ${FOUNDATION_CICD_PROJECT_ID} --stream +BUILD_ID=$(gcloud beta builds triggers run gcp-org---terraform-apply --branch=production --project "${FOUNDATION_CICD_PROJECT_ID}" --format="value(metadata.build.id)") +gcloud beta builds log "$BUILD_ID" --project "${FOUNDATION_CICD_PROJECT_ID}" --stream repos=("gcp-environments" "gcp-networks" "gcp-projects") envs=("development" "non-production" "production") -for repo in ${repos[@]}; do +for repo in "${repos[@]}"; do TRIGGER_NAME="${repo}---terraform-apply" - for env in ${envs[@]}; do + for env in "${envs[@]}"; do echo "Reconciling ${repo} in ${env}" - BUILD_ID=$(gcloud beta builds triggers run ${TRIGGER_NAME} --branch=${env} --project ${FOUNDATION_CICD_PROJECT_ID} --format="value(metadata.build.id)") - gcloud beta builds log $BUILD_ID --project ${FOUNDATION_CICD_PROJECT_ID} --stream + BUILD_ID=$(gcloud beta builds triggers run "${TRIGGER_NAME}" --branch="${env}" --project "${FOUNDATION_CICD_PROJECT_ID}" --format="value(metadata.build.id)") + gcloud beta builds log "$BUILD_ID" --project "${FOUNDATION_CICD_PROJECT_ID}" --stream done done diff --git a/infra/terraform/test-org/ci-project/.terraform.lock.hcl b/infra/terraform/test-org/ci-project/.terraform.lock.hcl new file mode 100644 index 00000000000..6a624ce020f --- /dev/null +++ b/infra/terraform/test-org/ci-project/.terraform.lock.hcl @@ -0,0 +1,65 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/google" { + version = "6.6.0" + constraints = ">= 3.53.0, >= 3.64.0, >= 5.26.0, < 7.0.0" + hashes = [ + "h1:mllWOZFO8u2kD2kRTdDDAa8Jt+vb8Uxhf6C0lwLxoz8=", + "zh:0c181f9b9f0ab81731e5c4c2d20b6d342244506687437dad94e279ef2a588f68", + "zh:12a4c333fc0ba670e87f09eb574e4b7da90381f9929ef7c866048b6841cc8a6a", + "zh:15c277c2052df89429051350df4bccabe4cf46068433d4d8c673820d9756fc00", + "zh:35d1663c81b81cd98d768fa7b80874b48c51b27c036a3c598a597f653374d3c8", + "zh:56b268389758d544722a342da4174c486a40ffa2a49b45a06111fe31c6c9c867", + "zh:abd3ea8c3a62928ba09ba7eb42b52f53e682bd65e92d573f1739596b5a9a67b1", + "zh:be55a328d61d9db58690db74ed43614111e1105e5e52cee15acaa062df4e233e", + "zh:ce2317ce9fd02cf14323f9e061c43a415b4ae9b3f96046460d0e6b6529a5aa6c", + "zh:d54a6d8e031c824f1de21b93c3e01ed7fec134b4ae55223d08868c6168c98e47", + "zh:d8c6e33b5467c6eb5a970adb251c4c8194af12db5388cff9d4b250294eae4daa", + "zh:f49e4cc9c0b55b3bec7da64dd698298345634a5df372228ee12aa45e57982f64", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + ] +} + +provider "registry.terraform.io/hashicorp/google-beta" { + version = "6.6.0" + constraints = ">= 3.64.0, >= 5.26.0, < 7.0.0" + hashes = [ + "h1:B4Wrkju7TTLWlCIDh+Vh4knFQS3wmFm0NHICGjCNO3k=", + "zh:1bf8f840a9a4ac1e120a6155225a0defbfa7f07b19c9bb37b45f95006b020ccf", + "zh:39077f037e611bdbd6af42e51b2881ea03d62ad55f21b42f90dc09e2cf812753", + "zh:64313dd2158749e3a4f2759edb896a9efa2c2afc59feb38d3af57e31eaa64480", + "zh:6bec8b21a20f50d81ca2e633cdaf1144bb8615a1dedf50e87c86f4eda3467b05", + "zh:74566c568410997fe966ef44130d19d640dbb427ffec3de93f0fd2affeb6fd8f", + "zh:8fe1c42d3229d0fe64961b7fa480689408eff3e5be62eb108d6aa9d36a10a769", + "zh:9593b59efd271623f45d133164eae16676130439727a625c10d3b929d2f28671", + "zh:a72c71431523d1f0d0d8baf7141ff16aa5938c0edf27e05dc4d1dc3455a50d01", + "zh:cbc96a215575d94601ec315a2db8802b521e904aaecf2602bce0110786cfa81f", + "zh:e71c3e06e861d5c9d1782b0bbaef93b5b9defa926304f90ddd22bc9b69ee14bd", + "zh:f559eefcc67d771ce0157e7ec021c1025b8af2a8c7ca89e1f5ac812e16bf9760", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + ] +} + +provider "registry.terraform.io/integrations/github" { + version = "6.3.1" + constraints = "~> 6.0" + hashes = [ + "h1:kNCbU7jr9j09hqWwyXGFDN95Un28gWO2kY2yImv1MDY=", + "zh:25ae1cb97ec528e6b7e9330489f4a33acc0fa80b909c113a8445656bc524c5b9", + "zh:3e1f6300dc10e52a54f13352770ed79f25ff4ba9ac49b776c52a655a3488a20b", + "zh:4aaf2877ec22e63358d7c9cd48c7d7947d1a1dc4d03231f0af193d8975d5918a", + "zh:4b904a81fac12a2a7606c8d811cb9c4e13581adcaaa19e503a067ac95c515925", + "zh:54fe7e0dca04e698631a5b86bdd43ef09a31375e68f8f89970b4315cd5fc6312", + "zh:6b14f92cf62784eaf20f43ef58ce966735f30d43deeab077943bd410c0d8b8b2", + "zh:86c49a1c11c024b26b6750c446f104922a3fe8464d3706a5fb9a4a05c6ca0b0a", + "zh:8939fb6332c4a58c4e90245eb9f0110987ccafff06b45a7ed513f2759a2abe6a", + "zh:8b4068a78c1f357325d1151facdb1aff506b9cd79d2bab21a55651255a130e2f", + "zh:ae22f5e52f534f19811d7f9480b4eb442f12ff16367b3893abb4e449b029ff6b", + "zh:afae9cfd9d49002ddfea552aa4844074b9974bd56ff2c2458f2297fe0df56a5b", + "zh:bc7a434408eb16a4fbceec0bd86b108a491408b727071402ad572cdb1afa2eb7", + "zh:c8e4728ea2d2c6e3d2c1bc5e7d92ed1121c02bab687702ec2748e3a6a0844150", + "zh:f6314b2cff0c0a07a216501cda51b35e6a4c66a2418c7c9966ccfe701e01b6b0", + "zh:fbd1fee2c9df3aa19cf8851ce134dea6e45ea01cb85695c1726670c285797e25", + ] +} diff --git a/infra/terraform/test-org/ci-project/cleaner.tf b/infra/terraform/test-org/ci-project/cleaner.tf index 5e096c6e172..9998e19cc55 100644 --- a/infra/terraform/test-org/ci-project/cleaner.tf +++ b/infra/terraform/test-org/ci-project/cleaner.tf @@ -49,16 +49,17 @@ resource "google_project_iam_custom_role" "create_build_role" { } resource "google_project_iam_member" "project" { - role = google_project_iam_custom_role.create_build_role.id - member = "serviceAccount:${google_service_account.service_account.email}" + role = google_project_iam_custom_role.create_build_role.id + member = "serviceAccount:${google_service_account.service_account.email}" + project = local.project_id } resource "google_cloud_scheduler_job" "job" { name = "trigger-test-org-iam-reset-build" description = "Trigger reset test org IAM build" region = "us-central1" - # run every week at 13:00 on Saturday - schedule = "0 13 * * 6" + # run every day at 3:00 + schedule = "0 3 * * *" http_target { http_method = "POST" diff --git a/infra/terraform/test-org/ci-project/gar.tf b/infra/terraform/test-org/ci-project/gar.tf new file mode 100644 index 00000000000..7b1d0ab56e3 --- /dev/null +++ b/infra/terraform/test-org/ci-project/gar.tf @@ -0,0 +1,19 @@ +module "artifact_registry" { + source = "GoogleCloudPlatform/artifact-registry/google" + version = "~> 0.3" + + location = "us" + project_id = local.project_id + format = "DOCKER" + repository_id = "gcr.io" + + members = { + readers = [ + "allUsers", + ] + + writers = [ + "serviceAccount:${module.service_accounts.service_account.email}", + ] + } +} diff --git a/infra/terraform/test-org/ci-project/github.tf b/infra/terraform/test-org/ci-project/github.tf index 271262eb9ec..463cce8eef0 100644 --- a/infra/terraform/test-org/ci-project/github.tf +++ b/infra/terraform/test-org/ci-project/github.tf @@ -20,8 +20,14 @@ resource "github_actions_secret" "infra_secret_gcr_project" { plaintext_value = local.project_id } -resource "github_actions_secret" "infra_secret_gcr_key" { +resource "github_actions_secret" "wif_provider" { repository = local.gh_repos.infra - secret_name = "GCP_SA_KEY" - plaintext_value = module.service_accounts.key + secret_name = "GCP_WIF_PROVIDER" + plaintext_value = module.oidc.provider_name +} + +resource "github_actions_secret" "wif_sa" { + repository = local.gh_repos.infra + secret_name = "GCP_WIF_SA_EMAIL" + plaintext_value = module.service_accounts.service_account.email } diff --git a/infra/terraform/test-org/ci-project/image_purger.tf b/infra/terraform/test-org/ci-project/image_purger.tf new file mode 100644 index 00000000000..c39e9149833 --- /dev/null +++ b/infra/terraform/test-org/ci-project/image_purger.tf @@ -0,0 +1,47 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +resource "google_cloudbuild_trigger" "image_purge_trigger" { + description = "Purge CFT images without tags" + github { + owner = local.gh_orgs.infra + name = local.gh_repos.infra + # this will be invoked via cloud scheduler, hence using a regex that will not match any branch + push { + branch = ".^" + } + } + + filename = "infra/terraform/test-org/image-cleanup/cloudbuild.yaml" +} + +resource "google_cloud_scheduler_job" "image_purge_job" { + name = "trigger-purge-cft-image-build" + description = "Trigger Purge CFT images without tags build" + region = "us-central1" + # run every day at 3:00 + schedule = "0 3 * * *" + + http_target { + http_method = "POST" + uri = "https://cloudbuild.googleapis.com/v1/projects/${local.project_id}/triggers/${google_cloudbuild_trigger.image_purge_trigger.trigger_id}:run" + body = base64encode("{\"branchName\": \"master\"}") + oauth_token { + service_account_email = google_service_account.service_account.email + } + } + depends_on = [google_project_iam_member.project] +} diff --git a/infra/terraform/test-org/ci-project/main.tf b/infra/terraform/test-org/ci-project/main.tf index e8dd7dcf295..ba74f0ba317 100644 --- a/infra/terraform/test-org/ci-project/main.tf +++ b/infra/terraform/test-org/ci-project/main.tf @@ -1,14 +1,27 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + provider "github" { - version = "~> 2.2" - organization = local.gh_orgs.infra + owner = local.gh_orgs.infra } provider "google" { - version = "~> 3.39" project = local.project_id } provider "google-beta" { - version = "~> 3.39" project = local.project_id } diff --git a/infra/terraform/test-org/ci-project/outputs.tf b/infra/terraform/test-org/ci-project/outputs.tf index 264a34bf1e1..85ba995e5ee 100644 --- a/infra/terraform/test-org/ci-project/outputs.tf +++ b/infra/terraform/test-org/ci-project/outputs.tf @@ -13,4 +13,3 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - diff --git a/infra/terraform/test-org/ci-project/sa.tf b/infra/terraform/test-org/ci-project/sa.tf index fd53e82e417..93098c77101 100644 --- a/infra/terraform/test-org/ci-project/sa.tf +++ b/infra/terraform/test-org/ci-project/sa.tf @@ -16,7 +16,7 @@ module "service_accounts" { source = "terraform-google-modules/service-accounts/google" - version = "~> 2.0" + version = "~> 4.1" project_id = local.project_id @@ -25,3 +25,18 @@ module "service_accounts" { "${local.project_id}=>roles/storage.admin" ] } + +module "oidc" { + source = "terraform-google-modules/github-actions-runners/google//modules/gh-oidc" + version = "~> 4.0" + + project_id = local.project_id + pool_id = "cft-pool" + provider_id = "cft-gh-provider" + sa_mapping = { + cft-github-actions = { + sa_name = module.service_accounts.service_account.name + attribute = "attribute.repository/GoogleCloudPlatform/cloud-foundation-toolkit" + } + } +} diff --git a/infra/terraform/test-org/ci-project/versions.tf b/infra/terraform/test-org/ci-project/versions.tf new file mode 100644 index 00000000000..708a6cc1b31 --- /dev/null +++ b/infra/terraform/test-org/ci-project/versions.tf @@ -0,0 +1,31 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +terraform { + required_version = ">= 1.4.4" + required_providers { + github = { + source = "integrations/github" + version = "~> 6.0" + } + google = { + version = ">= 5.26, < 7" + } + google-beta = { + version = ">= 5.26, < 7" + } + } +} diff --git a/infra/terraform/test-org/ci-triggers/.terraform.lock.hcl b/infra/terraform/test-org/ci-triggers/.terraform.lock.hcl new file mode 100644 index 00000000000..f34e75026cd --- /dev/null +++ b/infra/terraform/test-org/ci-triggers/.terraform.lock.hcl @@ -0,0 +1,42 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/google" { + version = "5.43.1" + constraints = ">= 2.18.0, >= 4.46.0, < 6.0.0, < 7.0.0" + hashes = [ + "h1:kKqTi+6TS7GCMKFbwf0aTA3FwZtJTaAtagGEZAFbCEg=", + "zh:40b46223d3804c32b8e1e8e16ba057230a774d9f250896d8d410272ed5b14318", + "zh:95a2f2b029b9f9dc0311f0a81d1d2ef41b0a19b4cfb50393c2ece23f53fcd785", + "zh:a8263fbc090c80c8c21b4278f7fedaeb792b5f6061b9e5182ce8aa69fc828035", + "zh:ac900b372ac879691cdc0bf149baee3a11a37d4102e30a4931f6cfd960f5c852", + "zh:ae336bee99114a98327141ef6555aa5953a87ce216f0f105c88a88947d477d2c", + "zh:babe5974f850610746111264e897d9d7ac1c44e17e81ffef53d28059e3019453", + "zh:c7f77ac8e07e488432d6b45688e2a45f1bded82d9914666136b7e50a74ab6fd9", + "zh:d5dd46a0bbf783141d0592043720c61ea055a359986f50bdbfd6fc5d03aae68c", + "zh:d805f8abc5e3d547bc9964ccc7d87e687add92397b2e3fedb40afcda7d93dca8", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + "zh:f678f3ee9d96b373d582d8012dcceca446f4891b9d1593447023b04137a6529e", + "zh:f6a75d48a0be31ee42b1db22b3ad9af2ead170bdd9f023ec8fb292b861b5fa3a", + ] +} + +provider "registry.terraform.io/hashicorp/google-beta" { + version = "6.1.0" + constraints = ">= 2.18.0, < 7.0.0" + hashes = [ + "h1:AEOTKPFbO8SKMOCBhzANkGST9yjGKO6OXjEScROE8Ro=", + "zh:3ea706aa701755ed212a13ebfc0332a587738add77f7433449ec0b0bde9ba6f3", + "zh:5025a74240983a7a55c1496b124da4193f23dec751dec203e1801439e6c232af", + "zh:535fe811841e7ce06050b66f4d32d5812df17cf942c15d3063c8d29197733938", + "zh:66e81f66c2bc4e2d325b537f9e9e51ea4ca4d1015b8c7ee8bdca74c6fc1f41b6", + "zh:71ebb3ba56666ba38424acebb5b149df10cb31f68e268e7be6ec66046c9e83b3", + "zh:8c798beb2516f726324289a06eb6956c88905c2a385e4cf18830c5210d9abc64", + "zh:9110f27d1c5cf2b797162c17ab75c47c856f8273fb66dbf26b3d2fe5b1a9b105", + "zh:a85f6ed96ade11563eaa3c8ab25e36c4a8abea8f3de2c59a2a318a110903d110", + "zh:aec1733cbe018418f62182b06f982ecaa32888398f93072cf5300523e9c6763d", + "zh:b58eb8b422eb6c576f1b597c1bcda618ef143d6acc9663730e9b4ce55e035814", + "zh:b5984d64c9bf7ac5e27b9fdbcc44fd206b320259007b5c7fc6e3c975b0762478", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + ] +} diff --git a/infra/terraform/test-org/ci-triggers/backend.tf b/infra/terraform/test-org/ci-triggers/backend.tf index 536613efd07..813fc324898 100644 --- a/infra/terraform/test-org/ci-triggers/backend.tf +++ b/infra/terraform/test-org/ci-triggers/backend.tf @@ -37,3 +37,10 @@ data "terraform_remote_state" "tf-validator" { } } +data "terraform_remote_state" "sfb-bootstrap" { + backend = "gcs" + config = { + bucket = "bkt-b-tfstate-1d93" + prefix = "terraform/bootstrap/state" + } +} diff --git a/infra/terraform/test-org/ci-triggers/locals.tf b/infra/terraform/test-org/ci-triggers/locals.tf index cb3db532b24..a62e2deac88 100644 --- a/infra/terraform/test-org/ci-triggers/locals.tf +++ b/infra/terraform/test-org/ci-triggers/locals.tf @@ -22,23 +22,61 @@ locals { "ci-shared", "ci-anthos-platform", "ci-example-foundation", - "ci-blueprints" + "ci-blueprints", + "ci-policy-blueprints", ] # custom mapping of the form name => repo_name used for overriding `terraform-google` prefix custom_repo_mapping = { - "cloud-foundation-training" = "cloud-foundation-training", - "example-foundation-app" = "terraform-example-foundation-app", - "anthos-samples" = "anthos-samples" + "cloud-foundation-training" = "cloud-foundation-training", + "example-foundation-app" = "terraform-example-foundation-app", + "anthos-samples" = "anthos-samples" + "docs-samples" = "terraform-docs-samples" + "dynamic-python-webapp" = "terraform-dynamic-python-webapp" + "dynamic-javascript-webapp" = "terraform-dynamic-javascript-webapp" + "deploy-java-multizone" = "terraform-example-deploy-java-multizone" + "ecommerce-microservices" = "terraform-ecommerce-microservices-on-gke" + "deploy-java-gke" = "terraform-example-deploy-java-gke" + "java-dynamic-point-of-sale" = "terraform-example-java-dynamic-point-of-sale" + "ml-image-annotation-gcf" = "terraform-ml-image-annotation-gcf" + "genai-doc-summarization" = "terraform-genai-doc-summarization" + "genai-knowledge-base" = "terraform-genai-knowledge-base" + "secured-notebook" = "notebooks-blueprint-security" + "sdw-onprem-ingest" = "terraform-google-secured-data-warehouse-onprem-ingest" + "pubsub-golang-app" = "terraform-pubsub-integration-golang" + "pubsub-java-app" = "terraform-pubsub-integration-java" + "genai-rag" = "terraform-genai-rag" + "cloud-client-api" = "terraform-cloud-client-api" + "dataanalytics-eventdriven" = "terraform-dataanalytics-eventdriven" + "kms-solutions" = "kms-solutions" } # example foundation has custom test modes example_foundation = { "terraform-example-foundation" = data.terraform_remote_state.org.outputs.ci_repos_folders["example-foundation"] } example_foundation_int_test_modes = ["default", "HubAndSpoke"] - repo_folder = { for key, value in data.terraform_remote_state.org.outputs.ci_repos_folders : contains(keys(local.custom_repo_mapping), key) ? local.custom_repo_mapping[key] : "terraform-google-${key}" => value if !contains(local.exclude_folders, value.folder_name) } - org_id = data.terraform_remote_state.org.outputs.org_id - billing_account = data.terraform_remote_state.org.outputs.billing_account - tf_validator_project_id = data.terraform_remote_state.tf-validator.outputs.project_id + repo_folder = { for key, value in data.terraform_remote_state.org.outputs.ci_repos_folders : contains(keys(local.custom_repo_mapping), key) ? local.custom_repo_mapping[key] : "terraform-google-${key}" => value if !contains(local.exclude_folders, value.folder_name) } + org_id = data.terraform_remote_state.org.outputs.org_id + billing_account = data.terraform_remote_state.org.outputs.billing_account + lr_billing_account = data.terraform_remote_state.org.outputs.lr_billing_account + tf_validator_project_id = data.terraform_remote_state.tf-validator.outputs.project_id + tf_validator_folder_id = trimprefix(data.terraform_remote_state.org.outputs.folders["ci-terraform-validator"], "folders/") + # tf validator "ancestry path" expects non-plural type names for historical reasons + tf_validator_ancestry = "organization/${local.org_id}/folder/${trimprefix(data.terraform_remote_state.org.outputs.folders["ci-projects"], "folders/")}/folder/${local.tf_validator_folder_id}" project_id = "cloud-foundation-cicd" forseti_ci_folder_id = "542927601143" billing_iam_test_account = "0151A3-65855E-5913CF" + # blueprints which can be layered on top of SFB + bp_on_sfb = [ + "terraform-google-cloud-run" + ] + # SFB deployment info + sfb_substs = { + _SFB_ORG_ID : "413973101099", + _SFB_SEED_PROJECT_ID : data.terraform_remote_state.sfb-bootstrap.outputs.seed_project_id, + _SFB_CLOUDBUILD_PROJECT_ID : data.terraform_remote_state.sfb-bootstrap.outputs.cloudbuild_project_id, + _SFB_TF_SA_NAME : data.terraform_remote_state.sfb-bootstrap.outputs.terraform_sa_name, + } + # vod test project id + vod_test_project_id = data.terraform_remote_state.org.outputs.ci_media_cdn_vod_project_id + # file logger opt-in + enable_file_log = { "terraform-docs-samples" : true } } diff --git a/infra/terraform/test-org/ci-triggers/logs.tf b/infra/terraform/test-org/ci-triggers/logs.tf new file mode 100644 index 00000000000..43a64acc18d --- /dev/null +++ b/infra/terraform/test-org/ci-triggers/logs.tf @@ -0,0 +1,29 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +module "filelogs_bucket" { + source = "terraform-google-modules/cloud-storage/google//modules/simple_bucket" + version = "~> 6.1" + + name = "${local.project_id}-filelogs" + project_id = local.project_id + location = "us-central1" + + iam_members = [{ + role = "roles/storage.objectAdmin" + member = "group:${data.terraform_remote_state.org.outputs.cft_ci_group}" + }] +} diff --git a/infra/terraform/test-org/ci-triggers/outputs.tf b/infra/terraform/test-org/ci-triggers/outputs.tf index 4a6a31d0bd7..46dd7f610fb 100644 --- a/infra/terraform/test-org/ci-triggers/outputs.tf +++ b/infra/terraform/test-org/ci-triggers/outputs.tf @@ -18,11 +18,10 @@ output "repo_folder" { value = { for k, v in local.repo_folder : k => v.folder_id } } - -output "lint_triggers" { - value = google_cloudbuild_trigger.lint_trigger.* +output "int_triggers" { + value = google_cloudbuild_trigger.int_trigger[*] } -output "int_triggers" { - value = google_cloudbuild_trigger.int_trigger.* +output "int_periodic_triggers" { + value = google_cloudbuild_trigger.periodic_int_trigger[*] } diff --git a/infra/terraform/test-org/ci-triggers/scheduler.tf b/infra/terraform/test-org/ci-triggers/scheduler.tf new file mode 100644 index 00000000000..ccc605ee0b2 --- /dev/null +++ b/infra/terraform/test-org/ci-triggers/scheduler.tf @@ -0,0 +1,52 @@ +/** + * Copyright 2023-2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +locals { + periodic_repos = toset([for item in data.terraform_remote_state.org.outputs.periodic_repos : contains(keys(local.custom_repo_mapping), item) ? local.custom_repo_mapping[item] : item]) +} + +resource "google_service_account" "periodic_sa" { + project = local.project_id + account_id = "periodic-test-trigger-sa" + display_name = "SA used by Cloud Scheduler to trigger periodics" +} + +resource "google_project_iam_member" "periodic_role" { + # custom role we created in ci-project/cleaner for scheduled cleaner builds + project = local.project_id + role = "projects/${local.project_id}/roles/CreateBuild" + member = "serviceAccount:${google_service_account.periodic_sa.email}" +} + +resource "google_cloud_scheduler_job" "job" { + for_each = local.periodic_repos + name = "periodic-${each.value}" + project = local.project_id + description = "Trigger periodic build for ${each.value}" + region = "us-central1" + # run every day at 3:00 + # todo(bharathkkb): likely need to stagger run times once number of repos increase + schedule = "0 3 * * *" + + http_target { + http_method = "POST" + uri = "https://cloudbuild.googleapis.com/v1/projects/${local.project_id}/triggers/${google_cloudbuild_trigger.periodic_int_trigger[each.value].trigger_id}:run" + body = base64encode("{\"branchName\": \"main\"}") + oauth_token { + service_account_email = google_service_account.periodic_sa.email + } + } +} diff --git a/infra/terraform/test-org/ci-triggers/secrets.tf b/infra/terraform/test-org/ci-triggers/secrets.tf new file mode 100644 index 00000000000..741608a83cc --- /dev/null +++ b/infra/terraform/test-org/ci-triggers/secrets.tf @@ -0,0 +1,63 @@ +/** + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +resource "google_secret_manager_secret" "tfe_token" { + project = local.project_id + secret_id = "tke-token" + + replication { + auto {} + } +} + +resource "google_secret_manager_secret_iam_member" "tfe_token_member" { + project = google_secret_manager_secret.tfe_token.project + secret_id = google_secret_manager_secret.tfe_token.secret_id + role = "roles/secretmanager.secretAccessor" + member = "group:${data.terraform_remote_state.org.outputs.cft_ci_group}" +} + +resource "google_secret_manager_secret" "im_github_pat" { + project = local.project_id + secret_id = "im_github_pat" + + replication { + auto {} + } +} + +resource "google_secret_manager_secret_iam_member" "im_github_pat_member" { + project = google_secret_manager_secret.im_github_pat.project + secret_id = google_secret_manager_secret.im_github_pat.secret_id + role = "roles/secretmanager.secretAccessor" + member = "group:${data.terraform_remote_state.org.outputs.cft_ci_group}" +} + +resource "google_secret_manager_secret" "im_gitlab_pat" { + project = local.project_id + secret_id = "im_gitlab_pat" + + replication { + auto {} + } +} + +resource "google_secret_manager_secret_iam_member" "im_gitlab_pat_member" { + project = google_secret_manager_secret.im_gitlab_pat.project + secret_id = google_secret_manager_secret.im_gitlab_pat.secret_id + role = "roles/secretmanager.secretAccessor" + member = "group:${data.terraform_remote_state.org.outputs.cft_ci_group}" +} diff --git a/infra/terraform/test-org/ci-triggers/triggers.tf b/infra/terraform/test-org/ci-triggers/triggers.tf index 5bc42202b5d..4f84191b53c 100644 --- a/infra/terraform/test-org/ci-triggers/triggers.tf +++ b/infra/terraform/test-org/ci-triggers/triggers.tf @@ -14,143 +14,404 @@ * limitations under the License. */ -resource "google_cloudbuild_trigger" "lint_trigger" { +resource "google_cloudbuild_trigger" "int_trigger" { provider = google-beta project = local.project_id - description = "Lint tests on pull request for ${each.key}" - for_each = merge(local.repo_folder, local.example_foundation) + name = "${substr(each.key, 0, 50)}-int-trigger" + description = "Integration tests on pull request for ${each.key}" + for_each = local.repo_folder github { owner = each.value.gh_org name = each.key pull_request { - branch = ".*" + branch = ".*" + comment_control = "COMMENTS_ENABLED_FOR_EXTERNAL_CONTRIBUTORS_ONLY" } } + substitutions = merge( + { + _BILLING_ACCOUNT = local.billing_account + _FOLDER_ID = each.value.folder_id + _ORG_ID = local.org_id + _BILLING_IAM_TEST_ACCOUNT = each.key == "terraform-google-iam" ? local.billing_iam_test_account : null + _VOD_TEST_PROJECT_ID = each.key == "terraform-google-media-cdn-vod" ? local.vod_test_project_id : null + _FILE_LOGS_BUCKET = lookup(local.enable_file_log, each.key, false) ? module.filelogs_bucket.url : null + _LR_BILLING_ACCOUNT = local.lr_billing_account + _TFE_TOKEN_SECRET_ID = each.key == "terraform-google-tf-cloud-agents" ? google_secret_manager_secret.tfe_token.id : null + _IM_GITHUB_PAT_SECRET_ID = each.key == "terraform-google-bootstrap" ? google_secret_manager_secret.im_github_pat.id : null + _IM_GITLAB_PAT_SECRET_ID = each.key == "terraform-google-bootstrap" ? google_secret_manager_secret.im_gitlab_pat.id : null + }, + # add sfb substitutions + contains(local.bp_on_sfb, each.key) ? local.sfb_substs : {} + ) - filename = "build/lint.cloudbuild.yaml" + filename = "build/int.cloudbuild.yaml" + ignored_files = ["**/*.md", ".gitignore", ".github/**", "**/metadata.yaml", "**/metadata.display.yaml", "assets/**", "infra/assets/**"] } -resource "google_cloudbuild_trigger" "int_trigger" { +# pull_request triggers do not support run trigger, so we have a shadow periodic trigger +resource "google_cloudbuild_trigger" "periodic_int_trigger" { provider = google-beta project = local.project_id - description = "Integration tests on pull request for ${each.key}" - for_each = local.repo_folder + name = substr("${each.key}-periodic-int-trigger", 0, 64) + description = "Periodic integration tests on pull request for ${each.key}" + for_each = { for k, v in local.repo_folder : k => v if contains(local.periodic_repos, k) } github { owner = each.value.gh_org name = each.key + # this will be invoked via cloud scheduler, hence using a regex that will not match any branch + push { + branch = ".^" + } + } + substitutions = merge( + { + _BILLING_ACCOUNT = local.billing_account + _FOLDER_ID = each.value.folder_id + _ORG_ID = local.org_id + _BILLING_IAM_TEST_ACCOUNT = each.key == "terraform-google-iam" ? local.billing_iam_test_account : null + _VOD_TEST_PROJECT_ID = each.key == "terraform-google-media-cdn-vod" ? local.vod_test_project_id : null + _FILE_LOGS_BUCKET = lookup(local.enable_file_log, each.key, false) ? module.filelogs_bucket.url : null + _LR_BILLING_ACCOUNT = local.lr_billing_account + _PERIODIC = true + }, + # add sfb substitutions + contains(local.bp_on_sfb, each.key) ? local.sfb_substs : {} + ) + + filename = "build/int.cloudbuild.yaml" + ignored_files = ["**/*.md", ".gitignore", ".github/**", "**/metadata.yaml"] +} + + +resource "google_cloudbuild_trigger" "tf_validator_main_integration_tests" { + for_each = { + tf12 = "0.12.31" + tf13 = "0.13.7" + } + name = "tf-validator-main-integration-tests-${each.key}" + description = "Main/release branch integration tests for terraform-validator with terraform ${each.value}. Managed by Terraform https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/blob/master/infra/terraform/test-org/tf-validator/project.tf" + + provider = google-beta + project = local.project_id + github { + owner = "GoogleCloudPlatform" + name = "terraform-validator" + push { + branch = "^(main|release-.+)$" + } + } + substitutions = { + _TERRAFORM_VERSION = each.value + _TEST_PROJECT = local.tf_validator_project_id + _TEST_FOLDER = local.tf_validator_folder_id + _TEST_ANCESTRY = local.tf_validator_ancestry + _TEST_ORG = local.org_id + } + + filename = ".ci/cloudbuild-tests-integration.yaml" +} + +resource "google_cloudbuild_trigger" "tf_validator_pull_integration_tests" { + for_each = { + tf12 = "0.12.31" + tf13 = "0.13.7" + } + name = "tf-validator-pull-integration-tests-${each.key}" + description = "Pull request integration tests for terraform-validator with terraform ${each.value}. Managed by Terraform https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/blob/master/infra/terraform/test-org/tf-validator/project.tf" + + provider = google-beta + project = local.project_id + github { + owner = "GoogleCloudPlatform" + name = "terraform-validator" pull_request { - branch = ".*" + branch = ".*" + comment_control = "COMMENTS_ENABLED_FOR_EXTERNAL_CONTRIBUTORS_ONLY" } } substitutions = { - _BILLING_ACCOUNT = local.billing_account - _FOLDER_ID = each.value.folder_id - _ORG_ID = local.org_id - _BILLING_IAM_TEST_ACCOUNT = each.key == "terraform-google-iam" ? local.billing_iam_test_account : null + _TERRAFORM_VERSION = each.value + _TEST_PROJECT = local.tf_validator_project_id + _TEST_FOLDER = local.tf_validator_folder_id + _TEST_ANCESTRY = local.tf_validator_ancestry + _TEST_ORG = local.org_id } - filename = "build/int.cloudbuild.yaml" - ignored_files = ["*.md", ".gitignore"] + filename = ".ci/cloudbuild-tests-integration.yaml" } -resource "google_cloudbuild_trigger" "tf_validator" { - provider = google-beta - project = local.project_id - description = "Pull request build for tf-validator" +resource "google_cloudbuild_trigger" "tf_validator_pull_unit_tests" { + name = "tf-validator-pull-unit-tests" + description = "Pull request unit tests for terraform-validator. Managed by Terraform https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/blob/master/infra/terraform/test-org/tf-validator/project.tf" + + provider = google-beta + project = local.project_id github { owner = "GoogleCloudPlatform" name = "terraform-validator" pull_request { - branch = ".*" + branch = ".*" + comment_control = "COMMENTS_ENABLED_FOR_EXTERNAL_CONTRIBUTORS_ONLY" } } substitutions = { - _TEST_PROJECT = local.tf_validator_project_id + _TEST_PROJECT = local.tf_validator_project_id + _TEST_FOLDER = local.tf_validator_folder_id + _TEST_ANCESTRY = local.tf_validator_ancestry + _TEST_ORG = local.org_id } - filename = "build/int.cloudbuild.yaml" + filename = ".ci/cloudbuild-tests-unit.yaml" } -resource "google_cloudbuild_trigger" "forseti_lint" { - provider = google-beta - project = local.project_id - description = "Lint tests on pull request for forseti" +resource "google_cloudbuild_trigger" "tf_validator_main_unit_tests" { + name = "tf-validator-main-unit-tests" + description = "Main/release branch unit tests for terraform-validator. Managed by Terraform https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/blob/master/infra/terraform/test-org/tf-validator/project.tf" + + provider = google-beta + project = local.project_id github { - owner = "forseti-security" - name = "terraform-google-forseti" + owner = "GoogleCloudPlatform" + name = "terraform-validator" + push { + branch = "^(main|release-.+)$" + } + } + substitutions = { + _TEST_PROJECT = local.tf_validator_project_id + _TEST_FOLDER = local.tf_validator_folder_id + _TEST_ANCESTRY = local.tf_validator_ancestry + _TEST_ORG = local.org_id + } + + filename = ".ci/cloudbuild-tests-unit.yaml" +} + +resource "google_cloudbuild_trigger" "tf_validator_pull_license_check" { + name = "tf-validator-pull-license-check" + description = "Pull request license check for terraform-validator. Managed by Terraform https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/blob/master/infra/terraform/test-org/tf-validator/project.tf" + + provider = google-beta + project = local.project_id + github { + owner = "GoogleCloudPlatform" + name = "terraform-validator" pull_request { - branch = ".*" + branch = ".*" + comment_control = "COMMENTS_ENABLED_FOR_EXTERNAL_CONTRIBUTORS_ONLY" } } - filename = "build/lint.cloudbuild.yaml" + filename = ".ci/cloudbuild-tests-go-licenses.yaml" } -resource "google_cloudbuild_trigger" "forseti_int" { - provider = google-beta - project = local.project_id - description = "Integration tests on pull request for forseti" +resource "google_cloudbuild_trigger" "tf_validator_main_license_check" { + name = "tf-validator-main-license-check" + description = "Main/release branch license check for terraform-validator. Managed by Terraform https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/blob/master/infra/terraform/test-org/tf-validator/project.tf" + + provider = google-beta + project = local.project_id github { - owner = "forseti-security" - name = "terraform-google-forseti" + owner = "GoogleCloudPlatform" + name = "terraform-validator" + push { + branch = "^(main|release-.+)$" + } + } + + filename = ".ci/cloudbuild-tests-go-licenses.yaml" +} + +resource "google_cloudbuild_trigger" "tgc_main_integration_tests" { + for_each = { + tf12 = "0.12.31" + tf13 = "0.13.7" + } + name = "tgc-main-integration-tests-${each.key}" + description = "Main/release branch integration tests for terraform-google-conversion with terraform ${each.value}. Managed by Terraform https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/blob/master/infra/terraform/test-org/tf-validator/project.tf" + + provider = google-beta + project = local.project_id + github { + owner = "GoogleCloudPlatform" + name = "terraform-google-conversion" + push { + branch = "^(main|release-.+)$" + } + } + substitutions = { + _TERRAFORM_VERSION = each.value + _TEST_PROJECT = local.tf_validator_project_id + _TEST_FOLDER = local.tf_validator_folder_id + _TEST_ANCESTRY = local.tf_validator_ancestry + _TEST_ORG = local.org_id + } + + filename = ".ci/cloudbuild-tests-integration.yaml" +} + +resource "google_cloudbuild_trigger" "tgc_pull_integration_tests" { + for_each = { + tf12 = "0.12.31" + tf13 = "0.13.7" + } + name = "tgc-pull-integration-tests-${each.key}" + description = "Pull request integration tests for terraform-google-conversion with terraform ${each.value}. Managed by Terraform https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/blob/master/infra/terraform/test-org/tf-validator/project.tf" + + provider = google-beta + project = local.project_id + github { + owner = "GoogleCloudPlatform" + name = "terraform-google-conversion" pull_request { - branch = ".*" + branch = ".*" + comment_control = "COMMENTS_ENABLED_FOR_EXTERNAL_CONTRIBUTORS_ONLY" } } substitutions = { - _BILLING_ACCOUNT = local.billing_account - _FOLDER_ID = local.forseti_ci_folder_id - _ORG_ID = local.org_id + _TERRAFORM_VERSION = each.value + _TEST_PROJECT = local.tf_validator_project_id + _TEST_FOLDER = local.tf_validator_folder_id + _TEST_ANCESTRY = local.tf_validator_ancestry + _TEST_ORG = local.org_id } - filename = "build/int.cloudbuild.yaml" + filename = ".ci/cloudbuild-tests-integration.yaml" +} + +resource "google_cloudbuild_trigger" "tgc_pull_unit_tests" { + name = "tgc-pull-unit-tests" + description = "Pull request unit tests for terraform-google-conversion. Managed by Terraform https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/blob/master/infra/terraform/test-org/tf-validator/project.tf" + + provider = google-beta + project = local.project_id + github { + owner = "GoogleCloudPlatform" + name = "terraform-google-conversion" + pull_request { + branch = ".*" + comment_control = "COMMENTS_ENABLED_FOR_EXTERNAL_CONTRIBUTORS_ONLY" + } + } + substitutions = { + _TEST_PROJECT = local.tf_validator_project_id + _TEST_FOLDER = local.tf_validator_folder_id + _TEST_ANCESTRY = local.tf_validator_ancestry + _TEST_ORG = local.org_id + } + + filename = ".ci/cloudbuild-tests-unit.yaml" +} + +resource "google_cloudbuild_trigger" "tgc_main_unit_tests" { + name = "tgc-main-unit-tests" + description = "Main/release branch unit tests for terraform-google-conversion. Managed by Terraform https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/blob/master/infra/terraform/test-org/tf-validator/project.tf" + + provider = google-beta + project = local.project_id + github { + owner = "GoogleCloudPlatform" + name = "terraform-google-conversion" + push { + branch = "^(main|release-.+)$" + } + } + substitutions = { + _TEST_PROJECT = local.tf_validator_project_id + _TEST_FOLDER = local.tf_validator_folder_id + _TEST_ANCESTRY = local.tf_validator_ancestry + _TEST_ORG = local.org_id + } + + filename = ".ci/cloudbuild-tests-unit.yaml" +} + +resource "google_cloudbuild_trigger" "tgc_pull_license_check" { + name = "tgc-pull-license-check" + description = "Pull request license check for terraform-google-conversion. Managed by Terraform https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/blob/master/infra/terraform/test-org/tf-validator/project.tf" + + provider = google-beta + project = local.project_id + github { + owner = "GoogleCloudPlatform" + name = "terraform-google-conversion" + pull_request { + branch = ".*" + comment_control = "COMMENTS_ENABLED_FOR_EXTERNAL_CONTRIBUTORS_ONLY" + } + } + + filename = ".ci/cloudbuild-tests-go-licenses.yaml" } -resource "google_cloudbuild_trigger" "tf_py_test_helper_lint" { +resource "google_cloudbuild_trigger" "tgc_main_license_check" { + name = "tgc-main-license-check" + description = "Main/release branch license check for terraform-google-conversion. Managed by Terraform https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/blob/master/infra/terraform/test-org/tf-validator/project.tf" + + provider = google-beta + project = local.project_id + github { + owner = "GoogleCloudPlatform" + name = "terraform-google-conversion" + push { + branch = "^(main|release-.+)$" + } + } + + filename = ".ci/cloudbuild-tests-go-licenses.yaml" +} + +resource "google_cloudbuild_trigger" "forseti_lint" { provider = google-beta project = local.project_id - description = "Lint tests on pull request for terraform-python-testing-helper" + description = "Lint tests on pull request for forseti" github { - owner = "GoogleCloudPlatform" - name = "terraform-python-testing-helper" + owner = "forseti-security" + name = "terraform-google-forseti" pull_request { - branch = ".*" + branch = ".*" + comment_control = "COMMENTS_ENABLED_FOR_EXTERNAL_CONTRIBUTORS_ONLY" } } - filename = ".ci/cloudbuild.lint.yaml" + filename = "build/lint.cloudbuild.yaml" } -resource "google_cloudbuild_trigger" "tf_py_test_helper_test" { +resource "google_cloudbuild_trigger" "forseti_int" { provider = google-beta project = local.project_id - description = "Test on pull request for terraform-python-testing-helper" + description = "Integration tests on pull request for forseti" github { - owner = "GoogleCloudPlatform" - name = "terraform-python-testing-helper" + owner = "forseti-security" + name = "terraform-google-forseti" pull_request { - branch = ".*" + branch = ".*" + comment_control = "COMMENTS_ENABLED_FOR_EXTERNAL_CONTRIBUTORS_ONLY" } } + substitutions = { + _BILLING_ACCOUNT = local.billing_account + _FOLDER_ID = local.forseti_ci_folder_id + _ORG_ID = local.org_id + } - filename = ".ci/cloudbuild.test.yaml" - included_files = [ - "**/*.tf", - "**/*.py" - ] + filename = "build/int.cloudbuild.yaml" } # example-foundation-int tests resource "google_cloudbuild_trigger" "example_foundations_int_trigger" { provider = google-beta project = local.project_id + name = "terraform-example-foundation-int-trigger-${each.value}" description = "Integration tests on pull request for example_foundations in ${each.value} mode" for_each = toset(local.example_foundation_int_test_modes) github { owner = values(local.example_foundation)[0]["gh_org"] name = keys(local.example_foundation)[0] pull_request { - branch = ".*" + branch = ".*" + comment_control = "COMMENTS_ENABLED_FOR_EXTERNAL_CONTRIBUTORS_ONLY" } } substitutions = { @@ -161,5 +422,29 @@ resource "google_cloudbuild_trigger" "example_foundations_int_trigger" { } filename = "build/int.cloudbuild.yaml" - ignored_files = ["*.md", ".gitignore"] + ignored_files = ["**/*.md", "**/*.png", ".gitignore", ".github/**", "**/*.example.tfvars", "helpers/foundation-deployer/**"] +} + + +resource "google_cloudbuild_trigger" "bpt_int_trigger" { + provider = google-beta + project = local.project_id + name = "bpt-int-trigger" + description = "Integration tests on pull request for blueprint test framework" + github { + owner = "GoogleCloudPlatform" + name = "cloud-foundation-toolkit" + pull_request { + branch = ".*" + comment_control = "COMMENTS_ENABLED_FOR_EXTERNAL_CONTRIBUTORS_ONLY" + } + } + substitutions = { + _BILLING_ACCOUNT = local.billing_account + _FOLDER_ID = data.terraform_remote_state.org.outputs.bpt_folder + _ORG_ID = local.org_id + } + + filename = "infra/blueprint-test/build/int.cloudbuild.yaml" + included_files = ["infra/blueprint-test/**"] } diff --git a/infra/terraform/test-org/ci-triggers/versions.tf b/infra/terraform/test-org/ci-triggers/versions.tf index 6906c006f61..34f1e6a9f7f 100644 --- a/infra/terraform/test-org/ci-triggers/versions.tf +++ b/infra/terraform/test-org/ci-triggers/versions.tf @@ -15,9 +15,15 @@ */ terraform { - required_version = ">= 0.12" + required_version = ">= 1.4.4" required_providers { - google-beta = "~> 2.18" + google-beta = { + source = "hashicorp/google-beta" + version = ">= 2.18, < 7" + } + google = { + source = "hashicorp/google" + version = ">= 2.18, < 7" + } } } - diff --git a/infra/terraform/test-org/github/.terraform.lock.hcl b/infra/terraform/test-org/github/.terraform.lock.hcl new file mode 100644 index 00000000000..173809811c1 --- /dev/null +++ b/infra/terraform/test-org/github/.terraform.lock.hcl @@ -0,0 +1,25 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/integrations/github" { + version = "6.4.0" + constraints = "~> 6.0" + hashes = [ + "h1:sJvuRMYWJ/ykZXTuoCuocHvx06hTwDVrXVVXq1814bw=", + "zh:00f431c2a2510efcb1115442dda5e90815bcb16e1a3301679ade0139fa963d3b", + "zh:12a862f4317b3cb65682c1b687650cd91eeee99e63774bdcfa8bcfc64bad097b", + "zh:226d5e09ff27f94cb9336089181d26f85cb30219b863a579597f2e107f37de49", + "zh:402ecaa5add568a52ee01d816810f3b90f693be35c680fcdc9b6284bf55326f1", + "zh:60e3bdd9fbefb3c1d790bc08889c1dc0e83636b82284faaa709411aa4f96bb9f", + "zh:625099eeff2f8aaecd22a24a451b326828435c8f9de86f2e5e99872e7b467fa7", + "zh:79e8b665421009df2260f50e10da1f7a7863b557ece96e2b07dfd2fad1e86fcd", + "zh:98e471fefc93dcfedeec750c694110db7d3331dc3a256191d30b9d2f70d12157", + "zh:a17702765e1fa92d1c288ddfd97075819ad61b344b341be7e09c554c841a6d9e", + "zh:ca72ccf40624ae26bf4660d8dd84a51638f0a1e78d5f19fdfaafaef97f838af6", + "zh:d009ab5527d45c44c424d26cd2eb51a5a6a6448f3fb1023b675789588cc08d64", + "zh:e5811be1e942a75b14dfcd3e03523d8df60cfbde0d7e24d75e78480a02a58949", + "zh:e6008ad28225ad6996b06bcd7f3070863329df406a56754e7fb9c31d6301ace4", + "zh:f1d93f56ea4f87183a5de4780704907605851d95a2d285a9ec755bf784c5569c", + "zh:fbd1fee2c9df3aa19cf8851ce134dea6e45ea01cb85695c1726670c285797e25", + ] +} diff --git a/infra/terraform/test-org/github/README.md b/infra/terraform/test-org/github/README.md index 4544861e42d..647376127c8 100644 --- a/infra/terraform/test-org/github/README.md +++ b/infra/terraform/test-org/github/README.md @@ -13,3 +13,17 @@ export GITHUB_TOKEN=aaaaaaa ``` Note, because of the many resources involved, you might need to run Terraform with `-refresh=false`. + + +## Inputs + +No inputs. + +## Outputs + +| Name | Description | +|------|-------------| +| labels | n/a | +| protected\_repos | n/a | + + diff --git a/infra/terraform/test-org/github/backend.tf b/infra/terraform/test-org/github/backend.tf index 6b40fc18f91..dff2fff2a55 100644 --- a/infra/terraform/test-org/github/backend.tf +++ b/infra/terraform/test-org/github/backend.tf @@ -21,10 +21,10 @@ terraform { } } -data "terraform_remote_state" "triggers" { +data "terraform_remote_state" "org" { backend = "gcs" config = { bucket = "cft-infra-test-tfstate" - prefix = "state/ci-triggers" + prefix = "state/org" } } diff --git a/infra/terraform/test-org/github/labels.tf b/infra/terraform/test-org/github/labels.tf index ac691c1be83..ccc3d397a5d 100644 --- a/infra/terraform/test-org/github/labels.tf +++ b/infra/terraform/test-org/github/labels.tf @@ -1,5 +1,5 @@ /** - * Copyright 2019 Google LLC + * Copyright 2019-2024 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,28 +15,45 @@ */ locals { - repo_labels = { - for o in flatten([ - for repo in local.repos : - [ - for label in local.labels : - { - "repo" : repo, - "label" : label.name, - "color" : label.color, - "description" : label.description - } - ] - ]) : - "${o.repo}/${o.label}" => o - } + labels = [ + { + name : "enhancement", + color : "a2eeef", + description : "New feature or request" + }, + { + name : "bug", + color : "d73a4a" + description : "Something isn't working" + }, + { + name : "good first issue", + color : "7057ff" + description : "Good for newcomers" + }, + { + name : "triaged", + color : "322560", + description : "Scoped and ready for work" + }, + { + name : "release-please:force-run", + color : "e7d87d", + description : "Force release-please to check for changes." + }, + ] } -# Create labels on all repos -resource "github_issue_label" "test_repo" { - for_each = local.repo_labels - repository = each.value.repo - name = each.value.label - color = each.value.color - description = each.value.description +module "repo_labels_gcp" { + source = "../../modules/repo_labels" + org = "GoogleCloudPlatform" + repo_list = [for k, v in module.repos_gcp.repos : k] + labels = local.labels +} + +module "repo_labels_tgm" { + source = "../../modules/repo_labels" + org = "terraform-google-modules" + repo_list = setunion([for k, v in module.repos_tgm.repos : k], ["terraform-docs-samples"]) + labels = local.labels } diff --git a/infra/terraform/test-org/github/main.tf b/infra/terraform/test-org/github/main.tf index cbdc066fce6..74e44220be5 100644 --- a/infra/terraform/test-org/github/main.tf +++ b/infra/terraform/test-org/github/main.tf @@ -15,113 +15,14 @@ */ locals { - gh_org = "terraform-google-modules" - repos = keys(data.terraform_remote_state.triggers.outputs.repo_folder) - labels = [ - { - name : "enhancement", - color : "a2eeef", - description : "New feature or request" - }, - { - name : "bug", - color : "d73a4a" - description : "Something isn't working" - }, - { - name : "duplicate", - color : "cfd3d7" - description : "This issue or pull request already exists" - }, - { - name : "good first issue", - color : "7057ff" - description : "Good for newcomers" - }, - { - name : "help wanted", - color : "008672", - description : "Extra attention is needed" - }, - { - name : "invalid", - color : "e4e669", - description : "Something doesn't seem right" - }, - { - name : "question", - color : "d876e3", - description : "Further information is requested" - }, - { - name : "wontfix", - color : "db643d", - description : "This will not be worked on" - }, - { - name : "triaged", - color : "322560", - description : "Scoped and ready for work" - }, - { - name : "upstream", - color : "B580D1", - description : "Work required on Terraform core or provider" - }, - { - name : "security", - color : "801336", - description : "Fixes a security vulnerability or lapse in best practice" - }, - { - name : "refactor", - color : "004445", - description : "Updates for readability, code cleanliness, DRYness, etc. Only needs Terraform exp." - }, - { - name : "blocked", - color : "ef4339", - description : "Blocked by some other work" - }, - { - name: "P1", - color: "b01111", - description: "highest priority issues" - }, - { - name: "P2", - color: "b4451f", - description: "high priority issues" - }, - { - name: "P3", - color: "e7d87d", - description: "medium priority issues" - }, - { - name: "P4", - color: "62a1db", - description: "low priority issues" - }, - { - name: "release-please:force-run", - color: "e7d87d", - description: "Force release-please to check for changes." - }, - { - name: "waiting-response", - color: "5319e7", - description: "Waiting for issue author to respond." - }, - { - name: "v0.13", - color: "edb761", - description: "Terraform v0.13 issue." - }, - ] + modules = data.terraform_remote_state.org.outputs.modules } provider "github" { - version = "~> 2.2" - organization = local.gh_org + owner = "terraform-google-modules" +} + +provider "github" { + alias = "gcp" + owner = "GoogleCloudPlatform" } diff --git a/infra/terraform/test-org/github/outputs.tf b/infra/terraform/test-org/github/outputs.tf index e7f25a684f4..7ad95a067c3 100644 --- a/infra/terraform/test-org/github/outputs.tf +++ b/infra/terraform/test-org/github/outputs.tf @@ -1,5 +1,5 @@ /** - * Copyright 2019 Google LLC + * Copyright 2019-2023 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,10 +14,10 @@ * limitations under the License. */ -output "repos" { - value = local.repos -} - output "labels" { value = local.labels } + +output "protected_repos" { + value = setunion([for k, v in module.repos_gcp.repos : k], [for k, v in module.repos_tgm.repos : k]) +} diff --git a/infra/terraform/test-org/github/protection.tf b/infra/terraform/test-org/github/protection.tf new file mode 100644 index 00000000000..b4c2dd444f9 --- /dev/null +++ b/infra/terraform/test-org/github/protection.tf @@ -0,0 +1,194 @@ +/** + * Copyright 2022-2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +locals { + tgm_modules_map = { for value in local.modules : value.name => value if value.org == "terraform-google-modules" } + gcp_modules_map = { for value in local.modules : value.name => value if value.org == "GoogleCloudPlatform" } +} + +data "github_team" "cft-admins" { + slug = "cft-admins" + provider = github +} + +data "github_team" "blueprint-solutions" { + slug = "blueprint-solutions" + provider = github.gcp +} + +module "repos_tgm" { + source = "../../modules/repositories" + repos_map = local.tgm_modules_map + ci_teams = ["blueprint-contributors"] + providers = { github = github } +} + +module "repos_gcp" { + source = "../../modules/repositories" + repos_map = local.gcp_modules_map + ci_teams = ["blueprint-contributors"] + providers = { github = github.gcp } +} + +// All new repos are created in advance in the GCP org +import { + for_each = local.gcp_modules_map + to = module.repos_gcp.github_repository.repo[each.value.name] + id = each.value.name +} + +// terraform-example-foundation CI is a special case - below +module "branch_protection_tgm" { + source = "../../modules/branch_protection" + repo_list = { for k, v in module.repos_tgm.repos : k => v if k != "terraform-example-foundation" } + repos_map = local.tgm_modules_map + admin = data.github_team.cft-admins.node_id + providers = { github = github } +} + +module "branch_protection_gcp" { + source = "../../modules/branch_protection" + repo_list = module.repos_gcp.repos + repos_map = local.gcp_modules_map + admin = data.github_team.blueprint-solutions.node_id + providers = { github = github.gcp } +} + +// terraform-example-foundation renovate is a special case +module "renovate_json_tgm" { + source = "../../modules/repo_file" + repo_list = { for k, v in module.repos_tgm.repos : k => v if k != "terraform-example-foundation" } + filename = ".github/renovate.json" + content = file("${path.module}/resources/renovate-repo-config.json") + providers = { github = github } +} + +module "renovate_json_gcp" { + source = "../../modules/repo_file" + repo_list = module.repos_gcp.repos + filename = ".github/renovate.json" + content = file("${path.module}/resources/renovate-repo-config.json") + providers = { github = github.gcp } +} + +module "stale_yml_tgm" { + source = "../../modules/repo_file" + repo_list = module.repos_tgm.repos + filename = ".github/workflows/stale.yml" + content = file("${path.module}/resources/stale.yml") + providers = { github = github } +} + +module "stale_yml_gcp" { + source = "../../modules/repo_file" + repo_list = module.repos_gcp.repos + filename = ".github/workflows/stale.yml" + content = file("${path.module}/resources/stale.yml") + providers = { github = github.gcp } +} + +module "conventional-commit-lint_yaml_tgm" { + source = "../../modules/repo_file" + repo_list = module.repos_tgm.repos + filename = ".github/conventional-commit-lint.yaml" + content = file("${path.module}/resources/conventional-commit-lint.yaml") + providers = { github = github } +} + +module "conventional-commit-lint_yaml_gcp" { + source = "../../modules/repo_file" + repo_list = module.repos_gcp.repos + filename = ".github/conventional-commit-lint.yaml" + content = file("${path.module}/resources/conventional-commit-lint.yaml") + providers = { github = github.gcp } +} + +module "trusted-contribution_yml_tgm" { + source = "../../modules/repo_file" + repo_list = module.repos_tgm.repos + filename = ".github/trusted-contribution.yml" + content = file("${path.module}/resources/trusted-contribution.yml") + providers = { github = github } +} + +module "trusted-contribution_yml_gcp" { + source = "../../modules/repo_file" + repo_list = module.repos_gcp.repos + filename = ".github/trusted-contribution.yml" + content = file("${path.module}/resources/trusted-contribution.yml") + providers = { github = github.gcp } +} + +module "codeowners_tgm" { + source = "../../modules/codeowners_file" + org = "terraform-google-modules" + providers = { github = github } + owner = "cft-admins" + repos_map = local.tgm_modules_map + repo_list = module.repos_tgm.repos +} + +module "codeowners_gcp" { + source = "../../modules/codeowners_file" + org = "GoogleCloudPlatform" + providers = { github = github.gcp } + owner = "blueprint-solutions" + repos_map = local.gcp_modules_map + repo_list = module.repos_gcp.repos +} + +module "lint_yaml_tgm" { + source = "../../modules/workflow_files" + repos_map = local.tgm_modules_map + repo_list = module.repos_tgm.repos + providers = { github = github } +} + +module "lint_yaml_gcp" { + source = "../../modules/workflow_files" + repos_map = local.gcp_modules_map + repo_list = module.repos_gcp.repos + providers = { github = github.gcp } +} + +# Special CI/branch protection case + +resource "github_branch_protection" "terraform-example-foundation" { + repository_id = module.repos_tgm.repos["terraform-example-foundation"].node_id + pattern = module.repos_tgm.repos["terraform-example-foundation"].default_branch + + required_pull_request_reviews { + required_approving_review_count = 1 + require_code_owner_reviews = true + } + + required_status_checks { + strict = true + contexts = [ + "cla/google", + "terraform-example-foundation-int-trigger-default (cloud-foundation-cicd)", + "terraform-example-foundation-int-trigger-HubAndSpoke (cloud-foundation-cicd)", + "lint", + "conventionalcommits.org" + ] + } + + enforce_admins = false + + restrict_pushes { + push_allowances = [data.github_team.cft-admins.node_id] + } +} diff --git a/infra/terraform/test-org/github/resources/conventional-commit-lint.yaml b/infra/terraform/test-org/github/resources/conventional-commit-lint.yaml new file mode 100644 index 00000000000..ee8e163717f --- /dev/null +++ b/infra/terraform/test-org/github/resources/conventional-commit-lint.yaml @@ -0,0 +1,19 @@ +# Copyright 2022-2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# NOTE: This file is automatically generated from: +# https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/blob/master/infra/terraform/test-org/github + +enabled: true +always_check_pr_title: true diff --git a/infra/terraform/test-org/github/resources/renovate-repo-config.json b/infra/terraform/test-org/github/resources/renovate-repo-config.json new file mode 100644 index 00000000000..715e64bdeec --- /dev/null +++ b/infra/terraform/test-org/github/resources/renovate-repo-config.json @@ -0,0 +1,4 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": ["github>GoogleCloudPlatform/cloud-foundation-toolkit//infra/terraform/test-org/github/resources/renovate"] +} diff --git a/infra/terraform/test-org/github/resources/renovate.json b/infra/terraform/test-org/github/resources/renovate.json new file mode 100644 index 00000000000..3310c233602 --- /dev/null +++ b/infra/terraform/test-org/github/resources/renovate.json @@ -0,0 +1,83 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": [ + "config:recommended", + ":semanticCommits", + ":preserveSemverRanges", + ":rebaseStalePrs" + ], + "minimumReleaseAge": "7 days", + "ignorePaths": [".github/workflows/lint.yaml", ".github/workflows/stale.yml"], + "labels": ["dependencies"], + "vulnerabilityAlerts": { + "labels": ["type:security"], + "minimumReleaseAge": "0 days" + }, + "constraints": { + "go": "1.23" + }, + "packageRules": [ + { + "matchFileNames": ["*", "modules/**"], + "commitMessagePrefix": "fix(deps):" + }, + { + "matchFileNames": ["*", "modules/**"], + "matchUpdateTypes": "major", + "commitMessagePrefix": "fix(deps)!:" + }, + { + "matchFileNames": ["examples/**", "test/**", ".github/**", "infra/**", "build/**", "Makefile"], + "commitMessagePrefix": "chore(deps):" + }, + { + "matchManagers": ["terraform"], + "matchDepTypes": ["module"], + "groupName": "terraform modules", + "matchUpdateTypes": ["minor", "patch"] + }, + { + "matchManagers": ["gomod"], + "postUpdateOptions": ["gomodTidy", "gomodUpdateImportPaths"] + }, + { + "matchManagers": ["gomod"], + "matchDatasources": ["golang-version"], + "rangeStrategy": "replace", + "allowedVersions": "1.23", + "postUpdateOptions": ["gomodTidy", "gomodUpdateImportPaths"] + }, + { + "matchManagers": ["regex", "gomod"], + "matchFileNames": ["!(modules/**)"], + "groupName": "go modules and/or dev-tools" + }, + { + "matchManagers": ["terraform"], + "matchDepNames": ["google", "google-beta"], + "groupName": "Terraform Google Provider", + "rangeStrategy": "widen", + "commitMessagePrefix": "feat(deps):" + } + ], + "regexManagers": [ + { + "fileMatch": ["(^|/)Makefile$"], + "matchStrings": [ + "DOCKER_TAG_VERSION_DEVELOPER_TOOLS := (?.*?)\\n" + ], + "datasourceTemplate": "docker", + "registryUrlTemplate": "https://gcr.io/cloud-foundation-cicd", + "depNameTemplate": "cft/developer-tools" + }, + { + "fileMatch": ["(^|/)build/(int|lint)\\.cloudbuild\\.yaml$"], + "matchStrings": [ + " _DOCKER_TAG_VERSION_DEVELOPER_TOOLS: '(?.*?)'\\n" + ], + "datasourceTemplate": "docker", + "registryUrlTemplate": "https://gcr.io/cloud-foundation-cicd", + "depNameTemplate": "cft/developer-tools" + } + ] +} diff --git a/infra/terraform/test-org/github/resources/stale.yml b/infra/terraform/test-org/github/resources/stale.yml new file mode 100644 index 00000000000..34a5677cdec --- /dev/null +++ b/infra/terraform/test-org/github/resources/stale.yml @@ -0,0 +1,34 @@ +# Copyright 2022-2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# NOTE: This file is automatically generated from: +# https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/blob/master/infra/terraform/test-org/github + +name: "Close stale issues" +on: + schedule: + - cron: "0 23 * * *" + +jobs: + stale: + if: github.repository_owner == 'GoogleCloudPlatform' || github.repository_owner == 'terraform-google-modules' + runs-on: ubuntu-latest + steps: + - uses: actions/stale@v9 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + stale-issue-message: 'This issue is stale because it has been open 60 days with no activity. Remove stale label or comment or this will be closed in 7 days' + stale-pr-message: 'This PR is stale because it has been open 60 days with no activity. Remove stale label or comment or this will be closed in 7 days' + exempt-issue-labels: 'triaged' + exempt-pr-labels: 'dependencies,autorelease: pending' diff --git a/infra/terraform/test-org/github/resources/trusted-contribution.yml b/infra/terraform/test-org/github/resources/trusted-contribution.yml new file mode 100644 index 00000000000..a3b4ff268ba --- /dev/null +++ b/infra/terraform/test-org/github/resources/trusted-contribution.yml @@ -0,0 +1,26 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# NOTE: This file is automatically generated from: +# https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/blob/master/infra/terraform/test-org/github + +annotations: + - type: comment + text: "/gcbrun" +trustedContributors: + - release-please[bot] + - renovate[bot] + - renovate-bot + - forking-renovate[bot] + - dependabot[bot] diff --git a/infra/terraform/test-org/github/versions.tf b/infra/terraform/test-org/github/versions.tf index 1fe4caaac61..eca0ff7296c 100644 --- a/infra/terraform/test-org/github/versions.tf +++ b/infra/terraform/test-org/github/versions.tf @@ -1,5 +1,5 @@ /** - * Copyright 2019 Google LLC + * Copyright 2019-2024 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,5 +15,11 @@ */ terraform { - required_version = "~> 0.12.0" + required_version = ">= 1.7.0" + required_providers { + github = { + source = "integrations/github" + version = "~> 6.0" + } + } } diff --git a/infra/terraform/test-org/image-cleanup/cft-image-cleanup.sh b/infra/terraform/test-org/image-cleanup/cft-image-cleanup.sh new file mode 100755 index 00000000000..e687b0042eb --- /dev/null +++ b/infra/terraform/test-org/image-cleanup/cft-image-cleanup.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +IFS=$'\n\t' +set -eou pipefail +MODE="DRYRUN" + +if [[ "$#" -lt 1 || "${1}" == '-h' || "${1}" == '--help' ]]; then + cat >&2 <<"EOF" +cft-image-cleanup.sh cleans up untagged cft-dev-tool images. +USAGE: + cft-image-cleanup.sh REPOSITORY [DELETE] + e.g. $ ./cft-image-cleanup.sh gcr.io/cloud-foundation-cicd/cft/developer-tools DELETE + would delete all image digests that do not have a tag in the gcr.io/cloud-foundation-cicd/cft/developer-tools repository +EOF + exit 1 +fi + +main(){ + local C=0 + IMAGE="${1}" + for digest in $(gcloud container images list-tags "${IMAGE}" --limit=999999 --sort-by=TIMESTAMP \ + --format='get(digest)' --filter='-tags:*'); do + if [[ "$MODE" == "DRYRUN" ]]; then + echo "to delete: $digest" + elif [[ "$MODE" == "DELETE" ]]; then + ( + set -x + gcloud container images delete -q --force-delete-tags "${IMAGE}@${digest}" + ) + fi + (( C=C+1 )) + done + echo "Deleted ${C} images in ${IMAGE}." >&2 +} + +if [[ "$#" -eq 1 ]]; then + echo ">>> executing in DRY RUN mode; use the DELETE arg for deleting the images <<<" +elif [[ "$#" -eq 2 && "${2}" == 'DELETE' ]]; then + MODE="DELETE" +fi +main "${1}" diff --git a/infra/terraform/test-org/image-cleanup/cloudbuild.yaml b/infra/terraform/test-org/image-cleanup/cloudbuild.yaml new file mode 100644 index 00000000000..e43e2f93e7a --- /dev/null +++ b/infra/terraform/test-org/image-cleanup/cloudbuild.yaml @@ -0,0 +1,65 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +steps: + - name: "gcr.io/cloud-builders/gcloud" + dir: "infra/terraform/test-org/image-cleanup" + id: "purge-dev-tools" + waitFor: ["-"] + entrypoint: "bash" + args: + [ + "./cft-image-cleanup.sh", + "$_REGISTRY_URL/$_DOCKER_IMAGE_DEVELOPER_TOOLS", + "DELETE", + ] + - name: "gcr.io/cloud-builders/gcloud" + dir: "infra/terraform/test-org/image-cleanup" + id: "purge-dev-tools-light" + waitFor: ["-"] + entrypoint: "bash" + args: + [ + "./cft-image-cleanup.sh", + "$_REGISTRY_URL/$_DOCKER_IMAGE_DEVELOPER_TOOLS_LIGHT", + "DELETE", + ] + - name: "gcr.io/cloud-builders/gcloud" + dir: "infra/terraform/test-org/image-cleanup" + id: "purge-dev-tool-krm" + waitFor: ["-"] + entrypoint: "bash" + args: + [ + "./cft-image-cleanup.sh", + "$_REGISTRY_URL/$_DOCKER_IMAGE_DEVELOPER_TOOLS_KRM", + "DELETE", + ] + - name: "gcr.io/cloud-builders/gcloud" + dir: "infra/terraform/test-org/image-cleanup" + id: "purge-dev-tools-jenkins" + waitFor: ["-"] + entrypoint: "bash" + args: + [ + "./cft-image-cleanup.sh", + "$_REGISTRY_URL/$_DOCKER_IMAGE_DEVELOPER_TOOLS_JENKINS", + "DELETE", + ] +substitutions: + _REGISTRY_URL: "gcr.io/cloud-foundation-cicd" + _DOCKER_IMAGE_DEVELOPER_TOOLS: "cft/developer-tools" + _DOCKER_IMAGE_DEVELOPER_TOOLS_LIGHT: "cft/developer-tools-light" + _DOCKER_IMAGE_DEVELOPER_TOOLS_KRM: "cft/developer-tools-krm" + _DOCKER_IMAGE_DEVELOPER_TOOLS_JENKINS: "cft/developer-tools-jenkins" diff --git a/infra/terraform/test-org/org-iam-policy/.terraform.lock.hcl b/infra/terraform/test-org/org-iam-policy/.terraform.lock.hcl new file mode 100644 index 00000000000..ccaf5262caa --- /dev/null +++ b/infra/terraform/test-org/org-iam-policy/.terraform.lock.hcl @@ -0,0 +1,32 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/google" { + version = "6.5.0" + constraints = ">= 3.39.0, < 7.0.0" + hashes = [ + "h1:7G5Ksk/gin2QBocux+AGihR8PFBi97Uegb1mnCpgp2U=", + "h1:Bn62acdFX335QgaXxzi1nV/Y3IR7MnlUo4o9WJqFmVM=", + "h1:EK0N1nYGk+qSbuyf1yr6K/sWuak90U6c2oeoRmByIk0=", + "h1:LSHH24FXWii7HkBd7vjh0n/pTgi57utcd90GLQ57xAA=", + "h1:MfrVKPYK6guw9lduDwnqUQs70z9DYAcZO6tE87pt8no=", + "h1:azHVE+umZeXbFICAk746gBiQTQ4/98FqtUio3z5CJEk=", + "h1:nGxZkoMD8HRfpNkHWb7exCEasQVFncIgL9UHFSsa1cY=", + "h1:p18nqSzt1XjrMFoGSN5486MHcJcIjcEm64S1ZX2yqRE=", + "h1:p27W4iBQHrPuKEDzL11GikNjoM5HtsShMCFkojx4m1A=", + "h1:qKHzN9B+9uOjHDXxanQJanBdsd0bzCP3egUjIcyYxSY=", + "h1:r8xOzBEmymR2UJiW2HhFqA0WyZselrKpa93RXjpQZg8=", + "zh:14101a38e880d4a1ef14c0a24476b008a3b577540a260a246a471bcfb5c31f4c", + "zh:478a7b0810956d39843e785262ab8162324a7412c3f6cf1ceb43977e2c05f22e", + "zh:6c9b583abcbaa2093b1b55494ac08851bd3364919fe86850a9c3e8f6c46851d4", + "zh:7c400eb5488221ba7ea48725ab43db1464cefd96cb29a24e63fe1950666b465f", + "zh:82931b2c186403753356a73878d36efc209c9e5ae46d0b609bb7ca38aece931d", + "zh:87e7966ef7067de3684f658251cdede057be419bbfeaaad935ab6f501024046a", + "zh:a2f4aaa3b9260732a53f78c8053eb2cbcee2abf11d3d245c58f3065423ad30ab", + "zh:bbc4c3ca9d51287e77130fc95880792007dd919b9b5396433f9eed737119c6c3", + "zh:edcda54d37be1b8d4cbe029e30df6a228e0be3887831b892c11536502d87e840", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + "zh:f95d92ca2ac527442f6767a217b8a557ba6b2d1915c39efba412382e986e4c3e", + "zh:f96148c3742126219b810a687551284460a8d78aa66efbfd2c58880f48049dda", + ] +} diff --git a/infra/terraform/test-org/org-iam-policy/cloudbuild.yaml b/infra/terraform/test-org/org-iam-policy/cloudbuild.yaml index 32be04383a5..20391bf1ddc 100644 --- a/infra/terraform/test-org/org-iam-policy/cloudbuild.yaml +++ b/infra/terraform/test-org/org-iam-policy/cloudbuild.yaml @@ -30,4 +30,4 @@ steps: args: ['apply', '--auto-approve'] substitutions: _DOCKER_IMAGE_DEVELOPER_TOOLS: 'cft/developer-tools' - _DOCKER_TAG_VERSION_DEVELOPER_TOOLS: '0.14' + _DOCKER_TAG_VERSION_DEVELOPER_TOOLS: '1.18' diff --git a/infra/terraform/test-org/org-iam-policy/iam.tf b/infra/terraform/test-org/org-iam-policy/iam.tf index 581c5eb7ecc..2c70c7fb310 100644 --- a/infra/terraform/test-org/org-iam-policy/iam.tf +++ b/infra/terraform/test-org/org-iam-policy/iam.tf @@ -16,12 +16,17 @@ locals { - cft_ci_group = "cft-ci-robots@test.infra.cft.tips" - cft_dev_group = "cft-developers@dev.infra.cft.tips" - gcp_admins_group_test = "gcp-admins@test.infra.cft.tips" + cft_ci_group = "cft-ci-robots@test.blueprints.joonix.net" + cft_dev_group = "cft-developers@develop.blueprints.joonix.net" + gcp_admins_group_test = "gcp-admins@test.blueprints.joonix.net" project_cleaner = "project-cleaner-function@${data.terraform_remote_state.project_cleaner.outputs.project_id}.iam.gserviceaccount.com" + billing_admin_group = "billing-admin@test.blueprints.joonix.net" + + ci_gsuite_sa = "ci-gsuite-sa@ci-gsuite-sa-project.iam.gserviceaccount.com" + cft_admin = "cft-admin@test.blueprints.joonix.net" + foundation_leads_group = "cloud-foundation-leads@google.com" + policy = { - "roles/accesscontextmanager.policyAdmin" : ["serviceAccount:project-factory-manager@gcp-foundation-shared-devops.iam.gserviceaccount.com"], "roles/billing.admin" : ["group:${local.gcp_admins_group_test}"], "roles/compute.xpnAdmin" : ["group:${local.cft_ci_group}"], "roles/containeranalysis.admin" : ["group:${local.cft_ci_group}"], @@ -29,15 +34,72 @@ locals { "roles/resourcemanager.folderAdmin" : ["group:${local.gcp_admins_group_test}"], "roles/resourcemanager.folderViewer" : ["serviceAccount:${local.project_cleaner}"], "roles/resourcemanager.lienModifier" : ["serviceAccount:${local.project_cleaner}"], - "roles/resourcemanager.organizationAdmin" : ["group:${local.cft_ci_group}", "group:${local.gcp_admins_group_test}", ], + "roles/resourcemanager.organizationAdmin" : [ + "group:${local.cft_ci_group}", + "group:${local.gcp_admins_group_test}", + "serviceAccount:${data.google_secret_manager_secret_version.org-admin-sa.secret_data}", + ], + "roles/iam.organizationRoleAdmin" : ["serviceAccount:${data.google_secret_manager_secret_version.org-role-admin-sa.secret_data}", ], "roles/resourcemanager.organizationViewer" : ["group:${local.cft_ci_group}"], "roles/resourcemanager.projectDeleter" : ["serviceAccount:${local.project_cleaner}"], "roles/owner" : ["group:${local.gcp_admins_group_test}", "serviceAccount:${local.project_cleaner}"], "roles/browser" : ["group:${local.cft_dev_group}"], - "roles/viewer" : ["group:${local.cft_dev_group}"] + "roles/viewer" : ["group:${local.cft_dev_group}", "serviceAccount:${local.project_cleaner}"], + "roles/compute.orgSecurityPolicyAdmin" : ["serviceAccount:${local.project_cleaner}"], + "roles/compute.orgSecurityResourceAdmin" : ["serviceAccount:${local.project_cleaner}"], + "roles/resourcemanager.folderEditor" : ["serviceAccount:${local.project_cleaner}"], + "roles/serviceusage.serviceUsageAdmin" : ["serviceAccount:${local.project_cleaner}"], + "roles/accesscontextmanager.policyReader" : ["group:${local.cft_ci_group}"], + "roles/assuredworkloads.admin" : ["group:${local.cft_ci_group}"], + "roles/iam.denyAdmin" : ["group:${local.cft_ci_group}"], + "roles/resourcemanager.tagAdmin" : ["group:${local.cft_ci_group}"], + } + + billing_policy = { + "roles/billing.admin" : [ + "group:${local.cft_ci_group}", + "group:${local.gcp_admins_group_test}", + "user:${local.cft_admin}", + "group:${local.foundation_leads_group}", + "group:${data.google_secret_manager_secret_version.ba-admin-1.secret_data}", + "group:${data.google_secret_manager_secret_version.ba-admin-2.secret_data}", + "group:${local.billing_admin_group}", + ], + "roles/logging.configWriter" : [ + "serviceAccount:${local.project_cleaner}", + "user:${local.cft_admin}", + ], + "roles/billing.user" : concat([ + "serviceAccount:${local.ci_gsuite_sa}", + ], jsondecode(data.google_storage_bucket_object_content.ba-users.content)) } } +data "google_secret_manager_secret_version" "org-admin-sa" { + project = "cloud-foundation-cicd" + secret = "org-admin-sa" +} + +data "google_secret_manager_secret_version" "org-role-admin-sa" { + project = "cloud-foundation-cicd" + secret = "org-role-admin-sa" +} + +data "google_secret_manager_secret_version" "ba-admin-1" { + project = "cloud-foundation-cicd" + secret = "ba-admin-1" +} + +data "google_secret_manager_secret_version" "ba-admin-2" { + project = "cloud-foundation-cicd" + secret = "ba-admin-2" +} + +data "google_storage_bucket_object_content" "ba-users" { + name = "ba-users.json" + bucket = "tf-data-199f44ed6f9a7f22" +} + resource "google_organization_iam_policy" "organization" { org_id = data.terraform_remote_state.org.outputs.org_id policy_data = data.google_iam_policy.admin.policy_data @@ -52,3 +114,18 @@ data "google_iam_policy" "admin" { } } } + +resource "google_billing_account_iam_policy" "billing" { + billing_account_id = data.terraform_remote_state.org.outputs.billing_account + policy_data = data.google_iam_policy.billing.policy_data +} + +data "google_iam_policy" "billing" { + dynamic "binding" { + for_each = local.billing_policy + content { + role = binding.key + members = binding.value + } + } +} diff --git a/infra/terraform/test-org/org-iam-policy/versions.tf b/infra/terraform/test-org/org-iam-policy/versions.tf index 29ddd8870ad..2b2acc63e6f 100644 --- a/infra/terraform/test-org/org-iam-policy/versions.tf +++ b/infra/terraform/test-org/org-iam-policy/versions.tf @@ -15,9 +15,11 @@ */ terraform { - required_version = ">= 0.12" -} - -provider "google" { - version = "~> 3.39" + required_version = ">= 1.4.4" + required_providers { + google = { + source = "hashicorp/google" + version = ">= 3.39, < 7" + } + } } diff --git a/infra/terraform/test-org/org/.terraform.lock.hcl b/infra/terraform/test-org/org/.terraform.lock.hcl new file mode 100644 index 00000000000..ed08a4c18a2 --- /dev/null +++ b/infra/terraform/test-org/org/.terraform.lock.hcl @@ -0,0 +1,165 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/external" { + version = "2.3.4" + constraints = ">= 1.2.0, >= 2.2.2, < 3.0.0" + hashes = [ + "h1:XWkRZOLKMjci9/JAtE8X8fWOt7A4u+9mgXSUjc4Wuyo=", + "zh:037fd82cd86227359bc010672cd174235e2d337601d4686f526d0f53c87447cb", + "zh:0ea1db63d6173d01f2fa8eb8989f0809a55135a0d8d424b08ba5dabad73095fa", + "zh:17a4d0a306566f2e45778fbac48744b6fd9c958aaa359e79f144c6358cb93af0", + "zh:298e5408ab17fd2e90d2cd6d406c6d02344fe610de5b7dae943a58b958e76691", + "zh:38ecfd29ee0785fd93164812dcbe0664ebbe5417473f3b2658087ca5a0286ecb", + "zh:59f6a6f31acf66f4ea3667a555a70eba5d406c6e6d93c2c641b81d63261eeace", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:ad0279dfd09d713db0c18469f585e58d04748ca72d9ada83883492e0dd13bd58", + "zh:c69f66fd21f5e2c8ecf7ca68d9091c40f19ad913aef21e3ce23836e91b8cbb5f", + "zh:d4a56f8c48aa86fc8e0c233d56850f5783f322d6336f3bf1916e293246b6b5d4", + "zh:f2b394ebd4af33f343835517e80fc876f79361f4688220833bc3c77655dd2202", + "zh:f31982f29f12834e5d21e010856eddd19d59cd8f449adf470655bfd19354377e", + ] +} + +provider "registry.terraform.io/hashicorp/google" { + version = "6.4.0" + constraints = ">= 3.19.0, >= 3.39.0, >= 3.43.0, >= 3.53.0, >= 4.28.0, >= 5.41.0, >= 6.0.0, < 7.0.0" + hashes = [ + "h1:+Xl/dWoAhhZ7GRPZwv7PCpnGa0MFGXyGesd9XxY+GeU=", + "zh:082e343d678da7bc8429c718b0251fc645a76b4d9b96a2cf669de02faa46c721", + "zh:117b781102aef79f63851bcb00e63d999d6b53ca46aac3f992107621c1058e47", + "zh:27bb144de4782ccc718485e033bfc7701ac36a3ee25ec41e4810a777d4fd083d", + "zh:3e0a05de8eb33bebb97947a515ae49760874ce30ff8601c79e8a4a38ca2b2510", + "zh:488777668eb61bdb4d5e949fc1f48a4c07a83f99c749a0b443be4908545bd412", + "zh:56f6a9d817dcb5754f377fae45e0ce0973a4619ee2eb26c8fdb933485ccc89e5", + "zh:5ed4a502834c5596e47969ad9bd646ff8c3c29d8aaaf75dfbd5623a577400a8d", + "zh:a0e971185ea15a62b505ccd8601fd16c1acf2744c51edc5a2cb151690055421c", + "zh:a2bf68d36c9ff401f554292cd4ace96443d1f1fb2dc11f95aa361a62c99dbc03", + "zh:c63f940a43258ba9aa95d7cc99104b12736f5ac76633009a5ad3c39335325a5c", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + "zh:fa41ab733169e962cd6f26bdcd295823290905e0afba97d68f12a028066b7cf3", + ] +} + +provider "registry.terraform.io/hashicorp/google-beta" { + version = "6.4.0" + constraints = ">= 3.19.0, >= 3.43.0, >= 4.11.0, >= 5.41.0, < 7.0.0" + hashes = [ + "h1:Iw3ruA/vLW0tWeGjtGYOC7Hv8cpQMcBP4nJggDhEwNQ=", + "zh:1999f091bf66ac63fbe23db2052c17cca92bcfadee1f593facba0606383c8dda", + "zh:26c80110366559ac713e8c94b967e27c0aae22a65f87b837b9e224acf4627b04", + "zh:3c847dd2816e297a8237b4951617d11723ee22645a4e10cf4a4ea20935bd53bb", + "zh:3dfbb433e1bf568f9658858f515ae17df41228a7a258920a5545fb19e46ac976", + "zh:422273df0ed56eaf7bb503d569c14b9df221ff0ac3f1d03400fa39b3becb9d32", + "zh:53353c7fa3a03ae7719da4a6f47a30aed0376d4b357d4987620a588474acd59a", + "zh:5d9e536fecf81f71e8ebb47150996b87f84f2ebdbb81e7eee32d490494fc8472", + "zh:9853c86c6f02517fd80c9fc57d2fd116a79fcd658670de9f1bdeeb7e74c2671d", + "zh:df97f71abdfc756ff9a5640d6d0b44ab7b3ce5ed237fc97bc8d77483f01ebed8", + "zh:e07a339f045c7c816ddc9dbb020796e3e3c956d0097ae98a09bdd7835446933e", + "zh:f07842ce106f8528f5c3e69a1b3128328a1ed4c7f16e84b7683d931ac5bcac00", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + ] +} + +provider "registry.terraform.io/hashicorp/kubernetes" { + version = "2.32.0" + constraints = ">= 2.13.0, ~> 2.13, < 3.0.0" + hashes = [ + "h1:HqeU0sZBh+2loFYqPMFx7jJamNUPEykyqJ9+CkMCYE0=", + "zh:0e715d7fb13a8ad569a5fdc937b488590633f6942e986196fdb17cd7b8f7720e", + "zh:495fc23acfe508ed981e60af9a3758218b0967993065e10a297fdbc210874974", + "zh:4b930a8619910ef528bc90dae739cb4236b9b76ce41367281e3bc3cf586101c7", + "zh:5344405fde7b1febf0734052052268ee24e7220818155702907d9ece1c0697c7", + "zh:92ee11e8c23bbac3536df7b124456407f35c6c2468bc0dbab15c3fc9f414bd0e", + "zh:a45488fe8d5bb59c49380f398da5d109a4ac02ebc10824567dabb87f6102fda8", + "zh:a4a0b57cf719a4c91f642436882b7bea24d659c08a5b6f4214ce4fe6a0204caa", + "zh:b7a27a6d11ba956a2d7b0f7389a46ec857ebe46ae3aeee537250e66cac15bf03", + "zh:bf94ce389028b686bfa70a90f536e81bb776c5c20ab70138bbe5c3d0a04c4253", + "zh:d965b2608da0212e26a65a0b3f33c5baae46cbe839196be15d93f70061516908", + "zh:f441fc793d03057a17af8bdca8b26d54916645bc5c148f54e22a54ed39089e83", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + ] +} + +provider "registry.terraform.io/hashicorp/null" { + version = "3.2.3" + constraints = ">= 2.1.0, < 4.0.0" + hashes = [ + "h1:+AnORRgFbRO6qqcfaQyeX80W0eX3VmjadjnUFUJTiXo=", + "zh:22d062e5278d872fe7aed834f5577ba0a5afe34a3bdac2b81f828d8d3e6706d2", + "zh:23dead00493ad863729495dc212fd6c29b8293e707b055ce5ba21ee453ce552d", + "zh:28299accf21763ca1ca144d8f660688d7c2ad0b105b7202554ca60b02a3856d3", + "zh:55c9e8a9ac25a7652df8c51a8a9a422bd67d784061b1de2dc9fe6c3cb4e77f2f", + "zh:756586535d11698a216291c06b9ed8a5cc6a4ec43eee1ee09ecd5c6a9e297ac1", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:9d5eea62fdb587eeb96a8c4d782459f4e6b73baeece4d04b4a40e44faaee9301", + "zh:a6355f596a3fb8fc85c2fb054ab14e722991533f87f928e7169a486462c74670", + "zh:b5a65a789cff4ada58a5baffc76cb9767dc26ec6b45c00d2ec8b1b027f6db4ed", + "zh:db5ab669cf11d0e9f81dc380a6fdfcac437aea3d69109c7aef1a5426639d2d65", + "zh:de655d251c470197bcbb5ac45d289595295acb8f829f6c781d4a75c8c8b7c7dd", + "zh:f5c68199f2e6076bce92a12230434782bf768103a427e9bb9abee99b116af7b5", + ] +} + +provider "registry.terraform.io/hashicorp/random" { + version = "3.6.3" + constraints = ">= 2.1.0, >= 2.2.0, >= 2.3.1, >= 3.0.0, < 4.0.0" + hashes = [ + "h1:Fnaec9vA8sZ8BXVlN3Xn9Jz3zghSETIKg7ch8oXhxno=", + "zh:04ceb65210251339f07cd4611885d242cd4d0c7306e86dda9785396807c00451", + "zh:448f56199f3e99ff75d5c0afacae867ee795e4dfda6cb5f8e3b2a72ec3583dd8", + "zh:4b4c11ccfba7319e901df2dac836b1ae8f12185e37249e8d870ee10bb87a13fe", + "zh:4fa45c44c0de582c2edb8a2e054f55124520c16a39b2dfc0355929063b6395b1", + "zh:588508280501a06259e023b0695f6a18149a3816d259655c424d068982cbdd36", + "zh:737c4d99a87d2a4d1ac0a54a73d2cb62974ccb2edbd234f333abd079a32ebc9e", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:a357ab512e5ebc6d1fda1382503109766e21bbfdfaa9ccda43d313c122069b30", + "zh:c51bfb15e7d52cc1a2eaec2a903ac2aff15d162c172b1b4c17675190e8147615", + "zh:e0951ee6fa9df90433728b96381fb867e3db98f66f735e0c3e24f8f16903f0ad", + "zh:e3cdcb4e73740621dabd82ee6a37d6cfce7fee2a03d8074df65086760f5cf556", + "zh:eff58323099f1bd9a0bec7cb04f717e7f1b2774c7d612bf7581797e1622613a0", + ] +} + +provider "registry.terraform.io/hashicorp/time" { + version = "0.12.1" + constraints = ">= 0.5.0" + hashes = [ + "h1:6BhxSYBJdBBKyuqatOGkuPKVenfx6UmLdiI13Pb3his=", + "zh:090023137df8effe8804e81c65f636dadf8f9d35b79c3afff282d39367ba44b2", + "zh:26f1e458358ba55f6558613f1427dcfa6ae2be5119b722d0b3adb27cd001efea", + "zh:272ccc73a03384b72b964918c7afeb22c2e6be22460d92b150aaf28f29a7d511", + "zh:438b8c74f5ed62fe921bd1078abe628a6675e44912933100ea4fa26863e340e9", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:85c8bd8eefc4afc33445de2ee7fbf33a7807bc34eb3734b8eefa4e98e4cddf38", + "zh:98bbe309c9ff5b2352de6a047e0ec6c7e3764b4ed3dfd370839c4be2fbfff869", + "zh:9c7bf8c56da1b124e0e2f3210a1915e778bab2be924481af684695b52672891e", + "zh:d2200f7f6ab8ecb8373cda796b864ad4867f5c255cff9d3b032f666e4c78f625", + "zh:d8c7926feaddfdc08d5ebb41b03445166df8c125417b28d64712dccd9feef136", + "zh:e2412a192fc340c61b373d6c20c9d805d7d3dee6c720c34db23c2a8ff0abd71b", + "zh:e6ac6bba391afe728a099df344dbd6481425b06d61697522017b8f7a59957d44", + ] +} + +provider "registry.terraform.io/integrations/github" { + version = "6.3.0" + constraints = "~> 6.0" + hashes = [ + "h1:LEs8NwSWwYGHxmbJvGT1w3XeAM6pogAmskY8XavuWDs=", + "zh:04fe3b820fe8c247b98b9d6810b8bb84d3e8ac08054faf450c42489815ef4bfa", + "zh:24096b2d16208d1411a58bdb8df8cd9f0558fb9054ffeb95c4e7e90a9a34f976", + "zh:2b27332adf8d08fbdc08b5f55e87691bce02c311219e6deb39c08753bd93db6d", + "zh:335dd6c2d50fcdce2ef0cc194465fdf9df1f5fdecc805804c78df30a4eb2e11e", + "zh:383a6879565969dbdf5405b651cd870c09c615dbd3df2554e5574d39d161c98c", + "zh:4903038a6bc605f372e1569695db4a2e2862e1fc6cf4faf9e13c5f8f4fa2ed94", + "zh:4cc4dffbee8b28102d38abe855b7440d4f4226261b43fda2ec289b48c3de1537", + "zh:57c30c6fe0b64fa86906700ceb1691562b62f2b1ef0404952aeb4092acb6acb3", + "zh:7bf518396fb00e4f55c406f2ffb5583b43278682a92f0864a0c47e3a74627bbb", + "zh:93c2c5cb90f74ad3c0874b7f7d8a866f28a852f0eda736c6aef8ce65d4061f4d", + "zh:9562a82a6193a2db110fb34d1aceeedb27c0a640058dce9c31b37b17eeb5f4e7", + "zh:ac97f2d111703a219f27fcbf5e89460ea98f9168badcc0913c8b214a37f76814", + "zh:c882af4d33b761ec198cedac212ab1c114d97540119dc97daca38021ab3edd0a", + "zh:c9ffd0a37f07a93af02a1caa90bfbea27a952d3e5badf4aab866ec71cdb184a3", + "zh:fbd1fee2c9df3aa19cf8851ce134dea6e45ea01cb85695c1726670c285797e25", + ] +} diff --git a/infra/terraform/test-org/org/bigquery_external_data.tf b/infra/terraform/test-org/org/bigquery_external_data.tf index bc293638d59..e70eb7f982d 100644 --- a/infra/terraform/test-org/org/bigquery_external_data.tf +++ b/infra/terraform/test-org/org/bigquery_external_data.tf @@ -21,13 +21,13 @@ resource "google_folder" "ci_bq_external_data_folder" { module "ci_bq_external_data_project" { source = "terraform-google-modules/project-factory/google" - version = "~> 4.0" + version = "~> 17.0" name = "ci-bq-external-data-project" project_id = "ci-bq-external-data-project" org_id = local.org_id folder_id = google_folder.ci_bq_external_data_folder.id - billing_account = local.billing_account + billing_account = local.old_billing_account labels = { cft-ci = "permanent" @@ -39,8 +39,9 @@ module "ci_bq_external_data_project" { } resource "google_storage_bucket" "ci_bq_external_data_storage_bucket" { - name = "ci-bq-external-data" - project = module.ci_bq_external_data_project.project_id + name = "ci-bq-external-data" + project = module.ci_bq_external_data_project.project_id + location = "US" } resource "google_storage_bucket_iam_member" "ci_bq_external_data_storage_bucket_member" { @@ -65,4 +66,4 @@ resource "google_storage_bucket_object" "ci_bq_external_hive_file_bar" { name = "hive_partition_example/year=2013/bar.csv" source = "external_data/bar.csv" bucket = google_storage_bucket.ci_bq_external_data_storage_bucket.name -} \ No newline at end of file +} diff --git a/infra/terraform/test-org/org/external_data/bar.csv b/infra/terraform/test-org/org/external_data/bar.csv index f021349f9d0..80b5b4dab7d 100644 --- a/infra/terraform/test-org/org/external_data/bar.csv +++ b/infra/terraform/test-org/org/external_data/bar.csv @@ -1,3 +1,3 @@ id,name,dept 3,Amy,SC -4,Jacob,SC \ No newline at end of file +4,Jacob,SC diff --git a/infra/terraform/test-org/org/external_data/bigquery-external-table-test.csv b/infra/terraform/test-org/org/external_data/bigquery-external-table-test.csv index be5f7fc8b76..b7d58cb4e3c 100644 --- a/infra/terraform/test-org/org/external_data/bigquery-external-table-test.csv +++ b/infra/terraform/test-org/org/external_data/bigquery-external-table-test.csv @@ -1,2 +1,2 @@ column_a,column_b -foo,bar \ No newline at end of file +foo,bar diff --git a/infra/terraform/test-org/org/external_data/foo.csv b/infra/terraform/test-org/org/external_data/foo.csv index 51a70e286b7..573cbaa462c 100644 --- a/infra/terraform/test-org/org/external_data/foo.csv +++ b/infra/terraform/test-org/org/external_data/foo.csv @@ -1,3 +1,3 @@ id,name,dept 1,Keith,TP -2,Madeline,HR \ No newline at end of file +2,Madeline,HR diff --git a/infra/terraform/test-org/org/folders.tf b/infra/terraform/test-org/org/folders.tf index aff6964ecb7..acee3ae74ea 100644 --- a/infra/terraform/test-org/org/folders.tf +++ b/infra/terraform/test-org/org/folders.tf @@ -16,7 +16,7 @@ module "folders-root" { source = "terraform-google-modules/folders/google" - version = "~> 2.0" + version = "~> 5.0" parent = "organizations/${local.org_id}" @@ -30,11 +30,21 @@ module "folders-root" { module "folders-ci" { source = "terraform-google-modules/folders/google" - version = "~> 2.0" + version = "~> 5.0" parent = "folders/${replace(local.folders["ci-projects"], "folders/", "")}" - names = [for module in concat(local.tgm_org_modules, local.gcp_org_modules) : "ci-${module}"] + names = [for module in [for repo in local.repos : try(repo.short_name, trimprefix(repo.name, "terraform-google-"))] : "ci-${module}"] + set_roles = false + deletion_protection = false +} + +module "bpt_ci_folder" { + source = "terraform-google-modules/folders/google" + version = "~> 5.0" + + parent = "folders/${replace(local.folders["ci-projects"], "folders/", "")}" + names = ["ci-bpt"] set_roles = false } diff --git a/infra/terraform/test-org/org/github.tf b/infra/terraform/test-org/org/github.tf new file mode 100644 index 00000000000..e6219dd4433 --- /dev/null +++ b/infra/terraform/test-org/org/github.tf @@ -0,0 +1,45 @@ +/** + * Copyright 2023-2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +provider "github" {} + +locals { + owners = flatten( + [for repo, val in local.repos : [for owner in setunion(lookup(val, "admins", []), lookup(val, "maintainers", [])) : owner]] + ) + + org_members = [for login in setunion(data.github_organization.tgm.users[*].login, data.github_organization.gcp.users[*].login) : login] + + invalid_owners = setsubtract(local.owners, local.org_members) +} + +variable "temp_allow_invalid_owners" { + type = list(string) + description = "Googlers added as owners on TF blueprint repos but are not part of the GCP or TGM orgs yet." + default = [ + "nidhi0710", # remove once heynidhi@ is added to GCP org + "sylvioneto", + "erictune", + "megelatim", + ] +} + +data "github_organization" "tgm" { + name = "terraform-google-modules" +} + +data "github_organization" "gcp" { + name = "GoogleCloudPlatform" +} diff --git a/infra/terraform/test-org/org/gsuite.tf b/infra/terraform/test-org/org/gsuite.tf index f8707c9bee3..fc8e7f24066 100644 --- a/infra/terraform/test-org/org/gsuite.tf +++ b/infra/terraform/test-org/org/gsuite.tf @@ -39,8 +39,8 @@ locals { "roles/storage.admin", ] - ci_gsuite_sa_bucket = "ci-gsuite-sa-secrets" - ci_gsuite_sa_bucket_path = "gsuite-sa.json" + ci_gsuite_sa_bucket = "ci-gsuite-sa-secrets" + // ci_gsuite_sa_bucket_path = "gsuite-sa.json" } resource "google_folder" "ci_gsuite_sa_folder" { @@ -50,13 +50,13 @@ resource "google_folder" "ci_gsuite_sa_folder" { module "ci_gsuite_sa_project" { source = "terraform-google-modules/project-factory/google" - version = "~> 4.0" + version = "~> 17.0" name = "ci-gsuite-sa-project" project_id = "ci-gsuite-sa-project" org_id = local.org_id folder_id = google_folder.ci_gsuite_sa_folder.id - billing_account = local.billing_account + billing_account = local.old_billing_account labels = { cft-ci = "permanent" @@ -98,22 +98,18 @@ resource "google_folder_iam_member" "ci_gsuite_sa_folder" { member = "serviceAccount:${google_service_account.ci_gsuite_sa.email}" } -resource "google_billing_account_iam_member" "ci_gsuite_sa_billing" { - billing_account_id = local.billing_account - role = "roles/billing.user" - member = "serviceAccount:${google_service_account.ci_gsuite_sa.email}" -} // Generate a json key and put it into the secrets bucket. - -resource "google_service_account_key" "ci_gsuite_sa" { - service_account_id = google_service_account.ci_gsuite_sa.id -} +//TODO(bbaiju): Re enable if needed for any CI +# resource "google_service_account_key" "ci_gsuite_sa" { +# service_account_id = google_service_account.ci_gsuite_sa.id +# } resource "google_storage_bucket" "ci_gsuite_sa" { name = local.ci_gsuite_sa_bucket storage_class = "MULTI_REGIONAL" project = module.ci_gsuite_sa_project.project_id + location = "US" versioning { enabled = true @@ -122,11 +118,12 @@ resource "google_storage_bucket" "ci_gsuite_sa" { force_destroy = true } -resource "google_storage_bucket_object" "ci_gsuite_sa_json" { - name = local.ci_gsuite_sa_bucket_path - content = base64decode(google_service_account_key.ci_gsuite_sa.private_key) - bucket = google_storage_bucket.ci_gsuite_sa.name -} +//TODO(bbaiju): Re enable if needed for any CI +# resource "google_storage_bucket_object" "ci_gsuite_sa_json" { +# name = local.ci_gsuite_sa_bucket_path +# content = base64decode(google_service_account_key.ci_gsuite_sa.private_key) +# bucket = google_storage_bucket.ci_gsuite_sa.name +# } # Grant G-Suite project rights to cft_ci_group. # Required to be able to create new gsuite sa keys and to fetch diff --git a/infra/terraform/test-org/org/iam.tf b/infra/terraform/test-org/org/iam.tf index c065039fefd..e0644db7c82 100644 --- a/infra/terraform/test-org/org/iam.tf +++ b/infra/terraform/test-org/org/iam.tf @@ -16,8 +16,8 @@ */ module "admin_bindings" { - source = "terraform-google-modules/iam/google" - version = "~> 2.0" + source = "terraform-google-modules/iam/google//modules/folders_iam" + version = "~> 8.0" folders = [local.folders["ci-projects"]] @@ -37,8 +37,8 @@ module "admin_bindings" { } module "ci_bindings" { - source = "terraform-google-modules/iam/google" - version = "~> 2.0" + source = "terraform-google-modules/iam/google//modules/folders_iam" + version = "~> 8.0" folders = [local.folders["ci-projects"]] @@ -58,8 +58,8 @@ module "ci_bindings" { } module "ci_folders_folder_bindings" { - source = "terraform-google-modules/iam/google" - version = "~> 2.0" + source = "terraform-google-modules/iam/google//modules/folders_iam" + version = "~> 8.0" folders = [local.ci_folders["ci-folders"]] @@ -69,9 +69,3 @@ module "ci_folders_folder_bindings" { ] } } - -resource "google_billing_account_iam_member" "ci-billing-user" { - billing_account_id = local.billing_account - role = "roles/billing.admin" - member = "group:${local.cft_ci_group}" -} diff --git a/infra/terraform/test-org/org/locals.tf b/infra/terraform/test-org/org/locals.tf index fef61322840..3995e8c2e80 100644 --- a/infra/terraform/test-org/org/locals.tf +++ b/infra/terraform/test-org/org/locals.tf @@ -1,5 +1,5 @@ /** - * Copyright 2019 Google LLC + * Copyright 2019-2024 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,11 +15,13 @@ */ locals { - org_id = "943740911108" - billing_account = "01D904-DAF6EC-F34EF7" - cft_ci_group = "cft-ci-robots@test.infra.cft.tips" - cft_dev_group = "cft-developers@dev.infra.cft.tips" - gcp_admins_group = "gcp-admins@test.infra.cft.tips" + org_id = "943740911108" + old_billing_account = "01D904-DAF6EC-F34EF7" + billing_account = "0138EF-C93849-98B0B5" + lr_billing_account = "01108A-537F1E-A5BFFC" + cft_ci_group = "cft-ci-robots@test.blueprints.joonix.net" + gcp_admins_group = "gcp-admins@test.blueprints.joonix.net" + ci_project_id = "cloud-foundation-cicd" folders = { "ci-projects" = module.folders-root.ids["ci-projects"] @@ -27,74 +29,896 @@ locals { } ci_folders = module.folders-ci.ids - ci_repos_folders = merge( - { for m in local.tgm_org_modules : m => { folder_name = "ci-${m}", folder_id = replace(module.folders-ci.ids["ci-${m}"], "folders/", ""), gh_org = "terraform-google-modules" } }, - { for m in local.gcp_org_modules : m => { folder_name = "ci-${m}", folder_id = replace(module.folders-ci.ids["ci-${m}"], "folders/", ""), gh_org = "GoogleCloudPlatform" } } - ) - tgm_org_modules = [ - "kms", - "network", - "folders", - "slo", - "sap", - "iam", - "event-function", - "kubernetes-engine", - "vpn", - "project-factory", - "pubsub", - "migrate", - "bootstrap", - "redis", - "datalab", - "mariadb", - "jenkins", - "container-vm", - "lb", - "vm", - "memorystore", - "airflow", - "service-accounts", - "cloud-storage", - "sql-db", - "vpc-service-controls", - "cloud-datastore", - "dataflow", - "cloud-nat", - "startup-scripts", - "scheduled-function", - "address", - "bigquery", - "bastion-host", - "org-policy", - "log-export", - "on-prem", - "cloud-dns", - "gsuite-export", - "secret", - "terraform-validator", - "lb-http", - "gcloud", - "lb-internal", - "utils", - "composer", - "github-actions-runners", - "healthcare", - "gke-gitlab", - "example-foundation", # Not module - "anthos-platform", # Not module - "cloud-operations", - "cloud-foundation-training", # Not module - "cloud-router", - "group", - ] - gcp_org_modules = [ - "example-foundation-app", # Not module - "anthos-samples", - "secure-cicd", - "secured-data-warehouse", - "cloud-run", - "network-forensics", - "blueprints", # Not module + ci_repos_folders = { + for repo in local.repos : try(repo.short_name, trimprefix(repo.name, "terraform-google-")) => { + folder_name = "ci-${try(repo.short_name, trimprefix(repo.name, "terraform-google-"))}", + folder_id = replace(module.folders-ci.ids["ci-${try(repo.short_name, trimprefix(repo.name, "terraform-google-"))}"], "folders/", ""), + gh_org = repo.org + } + } + jss_common_group = "jump-start-solutions-admins" + + adc_common_admins = ["q2w"] + + common_topics = { + hcls = "healthcare-life-sciences", + e2e = "end-to-end" + serverless = "serverless-computing", + compute = "compute" + containers = "containers", + db = "databases", + da = "data-analytics", + storage = "storage", + ops = "operations", + net = "networking", + security = "security-identity", + devtools = "developer-tools" + workspace = "workspace" + } + + /* + * repos schema + * name = "string" (required for modules) + * short_name = "string" (optional for modules, if not prefixed with 'terraform-google-') + * org = "terraform-google-modules" or "GoogleCloudPlatform" (required) + * description = "string" (required) + * maintainers = "list(string)" ["user1", "user2", "CASE SENSATIVE"] (optional) + * admins = "list(string)" ["user1", "user2", "CASE SENSATIVE"] (optional) + * groups = "list(string)" ["group1", "group1"] (optional) + * homepage_url = "string" (optional, overrides default) + * module = BOOL (optional, default is true which includes GH repo configuration) + * topics = "string1,string2,string3" (one or more of local.common_topics required if module = true) + * lint_env = "map(string)" (optional) + * disable_lint_yaml = BOOL (optional, default is true) + * enable_periodic = BOOL (optional, if enabled runs a daily periodic test. Defaults to false ) + * + */ + + repos = [ + { + name = "cloud-foundation-training" + org = "terraform-google-modules" + description = "" + maintainers = ["marine675"] + }, + { + name = "terraform-google-healthcare" + org = "terraform-google-modules" + description = "Handles opinionated Google Cloud Healthcare datasets and stores" + maintainers = ["yeweidaniel"] + topics = local.common_topics.hcls + }, + { + name = "terraform-google-cloud-run" + org = "GoogleCloudPlatform" + description = "Deploys apps to Cloud Run, along with option to map custom domain" + maintainers = concat(["prabhu34", "anamer", "gtsorbo"], local.adc_common_admins) + topics = "cloudrun,google-cloud-platform,terraform-modules,${local.common_topics.serverless}" + lint_env = { + ENABLE_BPMETADATA = "1" + } + }, + { + name = "terraform-google-secured-data-warehouse" + org = "GoogleCloudPlatform" + description = "Deploys a secured BigQuery data warehouse" + maintainers = ["lanre-OG"] + topics = join(",", [local.common_topics.da, local.common_topics.e2e]) + lint_env = { + SHELLCHECK_OPTS = "-e SC2154 -e SC2171 -e SC2086" + } + }, + { + name = "terraform-google-anthos-vm" + org = "GoogleCloudPlatform" + description = "Creates VMs on Anthos Bare Metal clusters" + maintainers = ["zhuchenwang"] + topics = "anthos,kubernetes,terraform-module,vm,${local.common_topics.compute}" + }, + { + name = "terraform-google-kubernetes-engine" + org = "terraform-google-modules" + description = "Configures opinionated GKE clusters" + maintainers = ["ericyz"] + admins = ["apeabody"] + topics = join(",", [local.common_topics.compute, local.common_topics.containers]) + }, + { + name = "terraform-ecommerce-microservices-on-gke" + short_name = "ecommerce-microservices" + org = "GoogleCloudPlatform" + description = "Deploys a web-based ecommerce app into a multi-cluster Google Kubernetes Engine setup." + groups = ["dee-platform-ops", local.jss_common_group] + enable_periodic = true + lint_env = { + ENABLE_BPMETADATA = "1" + } + }, + { + name = "terraform-example-java-dynamic-point-of-sale" + short_name = "java-dynamic-point-of-sale" + org = "GoogleCloudPlatform" + description = "Deploys a dynamic Java webapp into a Google Kubernetes Engine cluster." + maintainers = ["Shabirmean", "Mukamik"] + groups = ["dee-platform-ops", local.jss_common_group] + lint_env = { + "EXCLUDE_HEADER_CHECK" = "\\./infra/sql-schema" + } + enable_periodic = true + }, + { + name = "terraform-example-foundation" + short_name = "example-foundation" + org = "terraform-google-modules" + description = "Shows how the CFT modules can be composed to build a secure cloud foundation" + maintainers = ["rjerrems", "gtsorbo", "eeaton", "sleighton2022"] + homepage_url = "https://cloud.google.com/architecture/security-foundations" + topics = join(",", [local.common_topics.e2e, local.common_topics.ops]) + lint_env = { + "EXCLUDE_LINT_DIRS" = "\\./3-networks/modules/transitivity/assets", + "ENABLE_PARALLEL" = "0", + "DISABLE_TFLINT" = "1" + } + }, + { + name = "terraform-google-log-analysis" + org = "GoogleCloudPlatform" + description = "Stores and analyzes log data" + maintainers = ["ryotat7"] + topics = local.common_topics.da + groups = [local.jss_common_group] + enable_periodic = true + }, + { + name = "terraform-google-three-tier-web-app" + org = "GoogleCloudPlatform" + description = "Deploys a three tier web application using Cloud Run and Cloud SQL" + maintainers = ["tpryan"] + topics = join(",", [local.common_topics.serverless, local.common_topics.db]) + groups = [local.jss_common_group] + enable_periodic = true + lint_env = { + ENABLE_BPMETADATA = "1" + } + }, + { + name = "terraform-google-load-balanced-vms" + org = "GoogleCloudPlatform" + description = "Creates a Managed Instance Group with a loadbalancer" + maintainers = ["tpryan"] + topics = local.common_topics.net + lint_env = { + ENABLE_BPMETADATA = "1" + } + }, + { + name = "terraform-google-secure-cicd" + org = "GoogleCloudPlatform" + description = "Builds a secure CI/CD pipeline on Google Cloud" + maintainers = ["gtsorbo"] + topics = join(",", [local.common_topics.security, local.common_topics.devtools, local.common_topics.e2e]) + enable_periodic = true + groups = [local.jss_common_group] + lint_env = { + ENABLE_BPMETADATA = "1" + } + }, + { + name = "terraform-google-media-cdn-vod" + org = "GoogleCloudPlatform" + description = "Deploys Media CDN video-on-demand" + maintainers = ["roddzurcher"] + topics = local.common_topics.ops + groups = [local.jss_common_group] + enable_periodic = true + }, + { + name = "terraform-example-foundation-app" + short_name = "example-foundation-app" + org = "GoogleCloudPlatform" + description = "" + }, + { + name = "terraform-google-network-forensics" + org = "GoogleCloudPlatform" + description = "Deploys Zeek on Google Cloud" + maintainers = ["gtsorbo"] + topics = local.common_topics.net + }, + { + name = "terraform-google-secret-manager" + org = "GoogleCloudPlatform" + description = "Creates one or more Google Secret Manager secrets and manages basic permissions for them" + maintainers = local.adc_common_admins + topics = "gcp,kms,pubsub,terraform-module,${local.common_topics.security}" + }, + { + name = "terraform-google-address" + org = "terraform-google-modules" + description = "Manages Google Cloud IP addresses" + topics = local.common_topics.net + }, + { + name = "terraform-google-bastion-host" + org = "terraform-google-modules" + description = "Generates a bastion host VM compatible with OS Login and IAP Tunneling that can be used to access internal VMs" + topics = join(",", [local.common_topics.security, local.common_topics.ops, local.common_topics.devtools]) + }, + { + name = "terraform-google-bigquery" + org = "terraform-google-modules" + description = "Creates opinionated BigQuery datasets and tables" + topics = local.common_topics.da + maintainers = ["davenportjw", "shanecglass"] + groups = [local.jss_common_group] + lint_env = { + ENABLE_BPMETADATA = "1" + } + }, + { + name = "terraform-google-bootstrap" + org = "terraform-google-modules" + description = "Bootstraps Terraform usage and related CI/CD in a new Google Cloud organization" + topics = join(",", [local.common_topics.ops, local.common_topics.devtools]) + maintainers = ["josephdt12"] + }, + { + name = "terraform-google-cloud-datastore" + org = "terraform-google-modules" + description = "Manages Datastore" + topics = local.common_topics.db + }, + { + name = "terraform-google-cloud-dns" + org = "terraform-google-modules" + description = "Creates and manages Cloud DNS public or private zones and their records" + topics = local.common_topics.net + maintainers = ["imrannayer"] + }, + { + name = "terraform-google-cloud-nat" + org = "terraform-google-modules" + description = "Creates and configures Cloud NAT" + topics = local.common_topics.net + maintainers = ["imrannayer"] + }, + { + name = "terraform-google-cloud-operations" + org = "terraform-google-modules" + description = "Manages Cloud Logging and Cloud Monitoring" + topics = local.common_topics.ops + maintainers = ["imrannayer"] + groups = ["stackdriver-committers"] + }, + { + name = "terraform-google-cloud-router" + org = "terraform-google-modules" + description = "Manages a Cloud Router on Google Cloud" + topics = local.common_topics.net + maintainers = ["imrannayer"] + }, + { + name = "terraform-google-cloud-storage" + org = "terraform-google-modules" + description = "Creates one or more Cloud Storage buckets and assigns basic permissions on them to arbitrary users" + topics = local.common_topics.storage + maintainers = local.adc_common_admins + lint_env = { + ENABLE_BPMETADATA = "1" + } + }, + { + name = "terraform-google-composer" + org = "terraform-google-modules" + description = "Manages Cloud Composer v1 and v2 along with option to manage networking" + topics = join(",", [local.common_topics.da, local.common_topics.ops]) + maintainers = ["imrannayer"] + }, + { + name = "terraform-google-container-vm" + org = "terraform-google-modules" + description = "Deploys containers on Compute Engine instances" + topics = join(",", [local.common_topics.containers, local.common_topics.compute]) + }, + { + name = "terraform-google-data-fusion" + org = "terraform-google-modules" + description = "Manages Cloud Data Fusion" + topics = local.common_topics.da + }, + { + name = "terraform-google-dataflow" + org = "terraform-google-modules" + description = "Handles opinionated Dataflow job configuration and deployments" + topics = local.common_topics.da + }, + { + name = "terraform-google-datalab" + org = "terraform-google-modules" + description = "Creates DataLab instances with support for GPU instances" + topics = local.common_topics.da + }, + { + name = "terraform-google-event-function" + org = "terraform-google-modules" + description = "Responds to logging events with a Cloud Function" + topics = local.common_topics.serverless + }, + { + name = "terraform-google-folders" + org = "terraform-google-modules" + description = "Creates several Google Cloud folders under the same parent" + topics = local.common_topics.devtools + }, + { + name = "terraform-google-gcloud" + org = "terraform-google-modules" + description = "Executes Google Cloud CLI commands within Terraform" + topics = local.common_topics.devtools + lint_env = { "EXCLUDE_LINT_DIRS" = "\\./cache" } + }, + { + name = "terraform-google-github-actions-runners" + org = "terraform-google-modules" + description = "Creates self-hosted GitHub Actions Runners on Google Cloud" + topics = local.common_topics.devtools + maintainers = ["gtsorbo"] + }, + { + name = "terraform-google-gke-gitlab" + org = "terraform-google-modules" + description = "Installs GitLab on Kubernetes Engine" + topics = local.common_topics.devtools + }, + { + name = "terraform-google-group" + org = "terraform-google-modules" + description = "Manages Google Groups" + topics = local.common_topics.workspace + }, + { + name = "terraform-google-gsuite-export" + org = "terraform-google-modules" + description = "Creates a Compute Engine VM instance and sets up a cronjob to export GSuite Admin SDK data to Cloud Logging on a schedule" + topics = join(",", [local.common_topics.ops, local.common_topics.workspace]) + }, + { + name = "terraform-google-iam" + org = "terraform-google-modules" + description = "Manages multiple IAM roles for resources on Google Cloud" + topics = local.common_topics.security + maintainers = ["imrannayer"] + }, + { + name = "terraform-google-jenkins" + org = "terraform-google-modules" + description = "Creates a Compute Engine instance running Jenkins" + topics = local.common_topics.devtools + }, + { + name = "terraform-google-kms" + org = "terraform-google-modules" + description = "Allows managing a keyring, zero or more keys in the keyring, and IAM role bindings on individual keys" + topics = local.common_topics.security + }, + { + name = "terraform-google-lb" + org = "terraform-google-modules" + description = "Creates a regional TCP proxy load balancer for Compute Engine by using target pools and forwarding rules" + topics = local.common_topics.net + maintainers = ["imrannayer"] + }, + { + name = "terraform-google-lb-http" + org = "terraform-google-modules" + description = "Creates a global HTTP load balancer for Compute Engine by using forwarding rules" + topics = local.common_topics.net + maintainers = concat(["imrannayer"], local.adc_common_admins) + lint_env = { + ENABLE_BPMETADATA = "1" + } + }, + { + name = "terraform-google-lb-internal" + org = "terraform-google-modules" + description = "Creates an internal load balancer for Compute Engine by using forwarding rules" + topics = local.common_topics.net + maintainers = ["imrannayer"] + }, + { + name = "terraform-google-log-export" + org = "terraform-google-modules" + description = "Creates log exports at the project, folder, or organization level" + topics = local.common_topics.ops + maintainers = ["imrannayer"] + }, + { + name = "terraform-google-memorystore" + org = "terraform-google-modules" + description = "Creates a fully functional Google Memorystore (redis) instance" + topics = local.common_topics.db + maintainers = concat(["imrannayer"], local.adc_common_admins) + lint_env = { + ENABLE_BPMETADATA = "1" + } + }, + { + name = "terraform-google-module-template" + org = "terraform-google-modules" + description = "Provides a template for creating a Cloud Foundation Toolkit Terraform module" + disable_lint_yaml = true + }, + { + name = "terraform-google-network" + org = "terraform-google-modules" + description = "Sets up a new VPC network on Google Cloud" + topics = local.common_topics.net + maintainers = ["imrannayer"] + }, + { + name = "terraform-google-org-policy" + org = "terraform-google-modules" + description = "Manages Google Cloud organization policies" + topics = local.common_topics.security + lint_env = { + ENABLE_BPMETADATA = "1" + } + }, + { + name = "terraform-google-project-factory" + org = "terraform-google-modules" + description = "Creates an opinionated Google Cloud project by using Shared VPC, IAM, and Google Cloud APIs" + topics = local.common_topics.ops + lint_env = { + ENABLE_BPMETADATA = "1" + } + }, + { + name = "terraform-google-pubsub" + org = "terraform-google-modules" + description = "Creates Pub/Sub topic and subscriptions associated with the topic" + topics = local.common_topics.da + maintainers = ["imrannayer"] + }, + { + name = "terraform-google-sap" + org = "terraform-google-modules" + description = "Deploys SAP products" + topics = local.common_topics.compute + maintainers = ["sjswerdlow", "megelatim"] + }, + { + name = "terraform-google-scheduled-function" + org = "terraform-google-modules" + description = "Sets up a scheduled job to trigger events and run functions" + topics = local.common_topics.serverless + }, + { + name = "terraform-google-service-accounts" + org = "terraform-google-modules" + description = "Creates one or more service accounts and grants them basic roles" + maintainers = local.adc_common_admins + topics = local.common_topics.security + lint_env = { + ENABLE_BPMETADATA = "1" + } + }, + { + name = "terraform-google-slo" + org = "terraform-google-modules" + description = "Creates SLOs on Google Cloud from custom Stackdriver metrics capability to export SLOs to Google Cloud services and other systems" + topics = local.common_topics.ops + }, + { + name = "terraform-google-sql-db" + org = "terraform-google-modules" + description = "Creates a Cloud SQL database instance" + topics = local.common_topics.db + maintainers = concat(["isaurabhuttam", "imrannayer"], local.adc_common_admins) + lint_env = { + ENABLE_BPMETADATA = "1" + } + }, + { + name = "terraform-google-startup-scripts" + org = "terraform-google-modules" + description = "Provides a library of useful startup scripts to embed in VMs" + topics = local.common_topics.compute + }, + { + name = "terraform-google-utils" + org = "terraform-google-modules" + description = "Gets the short names for a given Google Cloud region" + topics = local.common_topics.ops + }, + { + name = "terraform-google-vault" + org = "terraform-google-modules" + description = "Deploys Vault on Compute Engine" + topics = "hashicorp-vault,${local.common_topics.ops},${local.common_topics.devtools},${local.common_topics.security}" + }, + { + name = "terraform-google-vm" + org = "terraform-google-modules" + description = "Provisions VMs in Google Cloud" + maintainers = concat(["erlanderlo"], local.adc_common_admins) + topics = local.common_topics.compute + lint_env = { + ENABLE_BPMETADATA = "1" + } + }, + { + name = "terraform-google-vpc-service-controls" + org = "terraform-google-modules" + description = "Handles opinionated VPC Service Controls and Access Context Manager configuration and deployments" + topics = local.common_topics.net + maintainers = ["imrannayer"] + }, + { + name = "terraform-google-vpn" + org = "terraform-google-modules" + description = "Sets up a Cloud VPN gateway" + topics = local.common_topics.net + maintainers = ["imrannayer"] + }, + { + short_name = "anthos-platform" + org = "terraform-google-modules" + module = false + }, + { + short_name = "anthos-samples" + org = "GoogleCloudPlatform" + module = false + }, + { + short_name = "blueprints" + org = "GoogleCloudPlatform" + module = false + }, + { + short_name = "docs-samples" + org = "terraform-google-modules" + module = false + enable_periodic = true + }, + { + short_name = "migrate" + org = "terraform-google-modules" + module = false + }, + { + short_name = "policy-blueprints" + org = "GoogleCloudPlatform" + module = false + }, + { + short_name = "terraform-validator" + org = "terraform-google-modules" + module = false + }, + { + name = "terraform-google-waap" + org = "GoogleCloudPlatform" + description = "Deploys the WAAP solution on Google Cloud" + maintainers = ["gtsorbo"] + topics = local.common_topics.ops + }, + { + name = "terraform-google-cloud-workflows" + org = "GoogleCloudPlatform" + description = "Manage Workflows with optional Scheduler or Event Arc triggers" + maintainers = ["anaik91"] + topics = join(",", [local.common_topics.serverless, local.common_topics.devtools]) + }, + { + name = "terraform-google-vertex-ai" + org = "GoogleCloudPlatform" + description = "Deploy Vertex AI resources" + maintainers = ["imrannayer"] + topics = join(",", [local.common_topics.compute]) + }, + { + name = "terraform-google-cloud-armor" + org = "GoogleCloudPlatform" + description = "Deploy Cloud Armor security policy" + maintainers = ["imrannayer"] + topics = join(",", [local.common_topics.compute, local.common_topics.net]) + }, + { + name = "terraform-google-pam" + org = "GoogleCloudPlatform" + description = "Deploy Privileged Access Manager" + maintainers = ["imrannayer", "mgaur10"] + topics = local.common_topics.security + }, + { + name = "terraform-google-netapp-volumes" + org = "GoogleCloudPlatform" + description = "Deploy NetApp Storage Volumes" + maintainers = ["imrannayer"] + topics = join(",", [local.common_topics.compute, local.common_topics.net]) + }, + { + name = "terraform-google-cloud-deploy" + org = "GoogleCloudPlatform" + description = "Create Cloud Deploy pipelines and targets" + maintainers = ["gtsorbo", "niranjankl"] + topics = join(",", [local.common_topics.devtools]) + }, + { + name = "terraform-google-cloud-functions" + org = "GoogleCloudPlatform" + description = "Deploys Cloud Functions (Gen 2)" + maintainers = ["prabhu34", "gtsorbo"] + topics = "cloudfunctions,functions,google-cloud-platform,terraform-modules,${local.common_topics.serverless}" + }, + { + name = "terraform-dynamic-python-webapp" + short_name = "dynamic-python-webapp" + org = "GoogleCloudPlatform" + description = "Deploy a dynamic python webapp" + maintainers = ["glasnt", "donmccasland"] + homepage_url = "avocano.dev" + groups = [local.jss_common_group, "team-egg"] + enable_periodic = true + lint_env = { + ENABLE_BPMETADATA = "1" + } + }, + { + name = "terraform-dynamic-javascript-webapp" + short_name = "dynamic-javascript-webapp" + org = "GoogleCloudPlatform" + description = "Deploy a dynamic javascript webapp" + maintainers = ["LukeSchlangen", "donmccasland"] + homepage_url = "avocano.dev" + groups = [local.jss_common_group, "team-egg", "developer-journey-app-approvers"] + enable_periodic = true + lint_env = { + ENABLE_BPMETADATA = "1" + } + }, + { + name = "terraform-example-deploy-java-multizone" + short_name = "deploy-java-multizone" + org = "GoogleCloudPlatform" + description = "Deploy a multizone Java application" + maintainers = ["donmccasland"] + groups = [local.jss_common_group] + enable_periodic = false + lint_env = { + ENABLE_BPMETADATA = "1" + } + }, + { + name = "terraform-google-itar-architectures" + org = "GoogleCloudPlatform" + description = "Includes use cases for deploying ITAR-aligned architectures on Google Cloud" + maintainers = ["gtsorbo"] + topics = join(",", [local.common_topics.compute], ["compliance"]) + }, + { + name = "terraform-google-analytics-lakehouse" + org = "GoogleCloudPlatform" + description = "Deploys a Lakehouse Architecture Solution" + maintainers = ["davenportjw", "bradmiro"] + topics = local.common_topics.da + groups = [local.jss_common_group] + enable_periodic = true + lint_env = { + ENABLE_BPMETADATA = "1" + } + }, + { + name = "terraform-google-alloy-db" + org = "GoogleCloudPlatform" + description = "Creates an Alloy DB instance" + maintainers = ["anaik91", "imrannayer"] + topics = local.common_topics.db + }, + { + name = "terraform-google-cloud-ids" + org = "GoogleCloudPlatform" + description = "Deploys a Cloud IDS instance and associated resources." + maintainers = ["gtsorbo", "mgaur10"] + topics = join(",", [local.common_topics.security, local.common_topics.net]) + }, + { + name = "terraform-example-deploy-java-gke" + short_name = "deploy-java-gke" + org = "GoogleCloudPlatform" + description = "Deploy a Legacy Java App GKE" + groups = ["dee-platform-ops", local.jss_common_group] + enable_periodic = true + lint_env = { + ENABLE_BPMETADATA = "1" + } + + }, + { + name = "terraform-google-crmint" + org = "GoogleCloudPlatform" + description = "Deploy the marketing analytics application, CRMint" + maintainers = ["dulacp"] + topics = join(",", [local.common_topics.da, local.common_topics.e2e], ["marketing"]) + }, + { + name = "terraform-ml-image-annotation-gcf" + short_name = "ml-image-annotation-gcf" + org = "GoogleCloudPlatform" + description = "Deploys an app for ml image annotation using gcf" + maintainers = ["xsxm", "ivanmkc", "balajismaniam", "donmccasland"] + groups = ["dee-data-ai", local.jss_common_group] + enable_periodic = true + }, + { + name = "terraform-google-out-of-band-security" + org = "GoogleCloudPlatform" + description = "Creates a 3P out-of-band security appliance deployment" + maintainers = ["Saipriyavk", "ChrisBarefoot"] + topics = local.common_topics.net + }, + { + name = "notebooks-blueprint-security" + short_name = "secured-notebook" + org = "GoogleCloudPlatform" + description = "Opinionated setup for securely using AI Platform Notebooks." + maintainers = ["gtsorbo", "erlanderlo"] + topics = join(",", [local.common_topics.da, local.common_topics.security]) + }, + { + name = "terraform-genai-doc-summarization" + short_name = "genai-doc-summarization" + org = "GoogleCloudPlatform" + description = "Summarizes document using OCR and Vertex Generative AI LLM" + maintainers = ["asrivas", "davidcavazos"] + groups = [local.jss_common_group] + enable_periodic = true + }, + { + name = "terraform-genai-knowledge-base" + short_name = "genai-knowledge-base" + org = "GoogleCloudPlatform" + description = "Fine tune an LLM model to answer questions from your documents." + maintainers = ["davidcavazos"] + groups = [local.jss_common_group] + enable_periodic = true + }, + { + name = "terraform-google-secured-data-warehouse-onprem-ingest" + short_name = "sdw-onprem-ingest" + org = "GoogleCloudPlatform" + description = "Deploys a secured data warehouse variant for ingesting encrypted data from on-prem sources" + maintainers = ["lanre-OG"] + topics = join(",", [local.common_topics.da, local.common_topics.security, local.common_topics.e2e]) + }, + { + name = "terraform-google-tf-cloud-agents" + org = "GoogleCloudPlatform" + description = "Creates self-hosted Terraform Cloud Agent on Google Cloud" + topics = join(",", [local.common_topics.ops, local.common_topics.security, local.common_topics.devtools]) + }, + { + name = "terraform-google-cloud-spanner" + org = "GoogleCloudPlatform" + description = "Deploy Spanner instances" + maintainers = ["anaik91", "imrannayer"] + topics = local.common_topics.db + }, + { + name = "terraform-pubsub-integration-golang" + org = "GoogleCloudPlatform" + short_name = "pubsub-golang-app" + maintainers = ["Shabirmean", "Mukamik"] + groups = ["dee-platform-ops", local.jss_common_group] + enable_periodic = true + }, + { + name = "terraform-pubsub-integration-java" + org = "GoogleCloudPlatform" + short_name = "pubsub-java-app" + maintainers = ["Shabirmean", "Mukamik"] + groups = ["dee-platform-ops", local.jss_common_group] + enable_periodic = true + }, + { + name = "terraform-google-backup-dr" + org = "GoogleCloudPlatform" + short_name = "backup-dr" + description = "Deploy Backup and DR appliances" + maintainers = ["umeshkumhar"] + topics = join(",", [local.common_topics.compute, local.common_topics.ops]) + }, + { + name = "terraform-google-tags" + org = "GoogleCloudPlatform" + description = "Create and manage Google Cloud Tags" + maintainers = ["nidhi0710"] + topics = join(",", [local.common_topics.security, local.common_topics.ops]) + }, + { + name = "terraform-google-dataplex-auto-data-quality" + org = "GoogleCloudPlatform" + description = "Move data between environments using Dataplex" + maintainers = ["bradmiro"] + topics = local.common_topics.da + }, + { + name = "terraform-google-enterprise-application" + org = "GoogleCloudPlatform" + description = "Deploy an enterprise developer platform on Google Cloud" + maintainers = ["gtsorbo", "erictune", "yliaog", "sleighton2022", "apeabody"] + topics = join(",", [local.common_topics.e2e, local.common_topics.ops]) + }, + { + name = "terraform-genai-rag" + short_name = "genai-rag" + org = "GoogleCloudPlatform" + description = "Deploys a Generative AI RAG solution" + maintainers = ["davenportjw", "bradmiro"] + groups = ["dee-platform-ops", "dee-data-ai", local.jss_common_group] + enable_periodic = true + lint_env = { + ENABLE_BPMETADATA = "1" + } + }, + { + name = "terraform-google-artifact-registry" + org = "GoogleCloudPlatform" + description = "Create and manage Artifact Registry repositories" + maintainers = ["prabhu34"] + topics = join(",", [local.common_topics.containers, local.common_topics.devtools]) + }, + { + name = "terraform-google-bigtable" + org = "GoogleCloudPlatform" + description = "Create and manage Google Bigtable resources" + maintainers = ["hariprabhaam"] + topics = local.common_topics.da + }, + { + name = "terraform-google-secure-web-proxy" + org = "GoogleCloudPlatform" + description = "Create and manage Secure Web Proxy on GCP for secured egress web traffic" + maintainers = ["maitreya-source"] + topics = join(",", [local.common_topics.security, local.common_topics.net]) + }, + { + name = "terraform-cloud-client-api" + short_name = "cloud-client-api" + org = "GoogleCloudPlatform" + description = "Deploys an example application that uses Cloud Client APIs" + maintainers = ["glasnt", "iennae"] + groups = ["team-egg", local.jss_common_group] + enable_periodic = true + lint_env = { + ENABLE_BPMETADATA = "1" + } + }, + { + name = "kms-solutions" + org = "GoogleCloudPlatform" + description = "Store Cloud KMS scripts, artifacts, code samples, and more." + maintainers = ["tdbhacks", "erlanderlo", "g-swap", "nb-goog"] + lint_env = { + ENABLE_BPMETADATA = "1" + } + }, + { + name = "terraform-dataanalytics-eventdriven" + short_name = "dataanalytics-eventdriven" + org = "GoogleCloudPlatform" + description = "Uses click-to-deploy to demonstrate how to load data from Cloud Storage to BigQuery using an event-driven load function." + groups = [local.jss_common_group] + maintainers = ["fellipeamedeiros", "sylvioneto"] + }, + { + name = "terraform-google-apphub" + org = "GoogleCloudPlatform" + description = "Creates and manages AppHub resources" + maintainers = ["q2w"] + admins = ["bharathkkb"] + }, ] } diff --git a/infra/terraform/test-org/org/outputs.tf b/infra/terraform/test-org/org/outputs.tf index 22c63f54a6f..1e3a81f6d60 100644 --- a/infra/terraform/test-org/org/outputs.tf +++ b/infra/terraform/test-org/org/outputs.tf @@ -1,5 +1,5 @@ /** - * Copyright 2019 Google LLC + * Copyright 2019-2024 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -30,6 +30,10 @@ output "billing_account" { value = local.billing_account } +output "lr_billing_account" { + value = local.lr_billing_account +} + output "cft_ci_group" { value = local.cft_ci_group } @@ -50,18 +54,19 @@ output "ci_gsuite_sa_project_id" { value = module.ci_gsuite_sa_project.project_id } -output "ci_gsuite_sa_key" { - value = google_service_account_key.ci_gsuite_sa.private_key - sensitive = true -} -output "ci_gsuite_sa_bucket" { - value = google_storage_bucket.ci_gsuite_sa.name -} +# output "ci_gsuite_sa_key" { +# value = google_service_account_key.ci_gsuite_sa.private_key +# sensitive = true +# } -output "ci_gsuite_sa_bucket_path" { - value = google_storage_bucket_object.ci_gsuite_sa_json.name -} +# output "ci_gsuite_sa_bucket" { +# value = google_storage_bucket.ci_gsuite_sa.name +# } + +# output "ci_gsuite_sa_bucket_path" { +# value = google_storage_bucket_object.ci_gsuite_sa_json.name +# } output "ci_bq_external_data_folder_id" { value = google_folder.ci_bq_external_data_folder.id @@ -90,3 +95,25 @@ output "ci_bq_external_hive_file_bar" { output "prow_int_sa" { value = module.prow-int-sa-wi.gcp_service_account_email } + +output "ci_media_cdn_vod_project_id" { + value = module.ci_media_cdn_vod_project.project_id +} + +output "modules" { + value = [for value in local.repos : value if try(value.module, true)] + + precondition { + condition = length(setsubtract(local.invalid_owners, var.temp_allow_invalid_owners)) == 0 + error_message = "Provided Repo Owners are not currently members of GCP or TGM Orgs: ${join(", ", setsubtract(local.invalid_owners, var.temp_allow_invalid_owners))}. You can bypass this error by setting `-var='temp_allow_invalid_owners=[\"${join("\",\"", local.invalid_owners)}\"]'` when running plan/apply." + } + +} + +output "bpt_folder" { + value = module.bpt_ci_folder.id +} + +output "periodic_repos" { + value = sort([for value in local.repos : coalesce(try(value.name, null), try(value.short_name, null)) if try(value.enable_periodic, false)]) +} diff --git a/infra/terraform/test-org/org/policy.tf b/infra/terraform/test-org/org/policy.tf index 76c039f69ea..33ec5ca3eb6 100644 --- a/infra/terraform/test-org/org/policy.tf +++ b/infra/terraform/test-org/org/policy.tf @@ -14,7 +14,14 @@ * limitations under the License. */ +provider "google" { + user_project_override = true + billing_project = local.ci_project_id + alias = "override" +} + resource "google_access_context_manager_access_policy" "access_policy" { - parent = "organizations/${local.org_id}" - title = "default policy" + provider = google.override + parent = "organizations/${local.org_id}" + title = "default policy" } diff --git a/infra/terraform/test-org/org/projects.tf b/infra/terraform/test-org/org/projects.tf new file mode 100644 index 00000000000..64f764aad14 --- /dev/null +++ b/infra/terraform/test-org/org/projects.tf @@ -0,0 +1,36 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +module "ci_media_cdn_vod_project" { + source = "terraform-google-modules/project-factory/google" + version = "~> 17.0" + + name = "ci-media-cdn-vod-project" + project_id = "ci-media-cdn-vod-project" + org_id = local.org_id + folder_id = module.folders-ci.ids["ci-media-cdn-vod"] + billing_account = local.old_billing_account + + labels = { + cft-ci = "permanent" + } + + activate_apis = [ + "cloudresourcemanager.googleapis.com", + "compute.googleapis.com", + "iam.googleapis.com", + ] +} diff --git a/infra/terraform/test-org/org/prow.tf b/infra/terraform/test-org/org/prow.tf index bf419712219..515b2ad0696 100644 --- a/infra/terraform/test-org/org/prow.tf +++ b/infra/terraform/test-org/org/prow.tf @@ -36,7 +36,7 @@ provider "kubernetes" { module "prow-int-sa-wi" { source = "terraform-google-modules/kubernetes-engine/google//modules/workload-identity" - version = "~> 16.0" + version = "~> 33.0" name = "int-test-sa" namespace = local.test_ns project_id = local.prow_project_id diff --git a/infra/terraform/test-org/org/versions.tf b/infra/terraform/test-org/org/versions.tf index c755d53d5b6..98cf4acd4e3 100644 --- a/infra/terraform/test-org/org/versions.tf +++ b/infra/terraform/test-org/org/versions.tf @@ -1,5 +1,5 @@ /** - * Copyright 2019 Google LLC + * Copyright 2019-2023 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,26 +15,35 @@ */ terraform { - required_version = ">= 0.12" + required_version = ">= 1.4.4" + required_providers { + external = { + source = "hashicorp/external" + version = ">= 1.2, < 3" + } + google = { + source = "hashicorp/google" + version = ">= 3.19, < 7" + } + google-beta = { + source = "hashicorp/google-beta" + version = ">= 3.19, < 7" + } + null = { + source = "hashicorp/null" + version = ">= 2.1, < 4" + } + random = { + source = "hashicorp/random" + version = ">= 2.3.1, < 4" + } + github = { + source = "integrations/github" + version = "~> 6.0" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.13, < 3" + } + } } - -provider "external" { - version = "~> 1.2" -} - -provider "google" { - version = "~> 3.19" -} - -provider "google-beta" { - version = "~> 3.19" -} - -provider "null" { - version = "~> 2.1" -} - -provider "random" { - version = "~> 2.2" -} - diff --git a/infra/terraform/test-org/test-cleanup/.terraform.lock.hcl b/infra/terraform/test-org/test-cleanup/.terraform.lock.hcl new file mode 100644 index 00000000000..2ada4474b06 --- /dev/null +++ b/infra/terraform/test-org/test-cleanup/.terraform.lock.hcl @@ -0,0 +1,122 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/archive" { + version = "2.6.0" + constraints = ">= 1.2.0, >= 1.3.0, < 3.0.0" + hashes = [ + "h1:rYAubRk7UHC/fzYqFV/VHc+7VIY01ugCxauyTYCNf9E=", + "zh:29273484f7423b7c5b3f5df34ccfc53e52bb5e3d7f46a81b65908e7a8fd69072", + "zh:3cba58ec3aea5f301caf2acc31e184c55d994cc648126cac39c63ae509a14179", + "zh:55170cd17dbfdea842852c6ae2416d057fec631ba49f3bb6466a7268cd39130e", + "zh:7197db402ba35631930c3a4814520f0ebe980ae3acb7f8b5a6f70ec90dc4a388", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:8bf7fe0915d7fb152a3a6b9162614d2ec82749a06dba13fab3f98d33c020ec4f", + "zh:8ce811844fd53adb0dabc9a541f8cb43aacfa7d8e39324e4bd3592b3428f5bfb", + "zh:bca795bca815b8ac90e3054c0a9ab1ccfb16eedbb3418f8ad473fc5ad6bf0ef7", + "zh:d9355a18df5a36cf19580748b23249de2eb445c231c36a353709f8f40a6c8432", + "zh:dc32cc32cfd8abf8752d34f2a783de0d3f7200c573b885ecb64ece5acea173b4", + "zh:ef498e20391bf7a280d0fd6fd6675621c85fbe4e92f0f517ae4394747db89bde", + "zh:f2bc5226c765b0c8055a7b6207d0fe1eb9484e3ec8880649d158827ac6ed3b22", + ] +} + +provider "registry.terraform.io/hashicorp/google" { + version = "6.6.0" + constraints = ">= 3.38.0, >= 3.43.0, >= 3.53.0, >= 4.23.0, >= 4.28.0, >= 5.31.0, >= 5.41.0, < 7.0.0" + hashes = [ + "h1:mllWOZFO8u2kD2kRTdDDAa8Jt+vb8Uxhf6C0lwLxoz8=", + "zh:0c181f9b9f0ab81731e5c4c2d20b6d342244506687437dad94e279ef2a588f68", + "zh:12a4c333fc0ba670e87f09eb574e4b7da90381f9929ef7c866048b6841cc8a6a", + "zh:15c277c2052df89429051350df4bccabe4cf46068433d4d8c673820d9756fc00", + "zh:35d1663c81b81cd98d768fa7b80874b48c51b27c036a3c598a597f653374d3c8", + "zh:56b268389758d544722a342da4174c486a40ffa2a49b45a06111fe31c6c9c867", + "zh:abd3ea8c3a62928ba09ba7eb42b52f53e682bd65e92d573f1739596b5a9a67b1", + "zh:be55a328d61d9db58690db74ed43614111e1105e5e52cee15acaa062df4e233e", + "zh:ce2317ce9fd02cf14323f9e061c43a415b4ae9b3f96046460d0e6b6529a5aa6c", + "zh:d54a6d8e031c824f1de21b93c3e01ed7fec134b4ae55223d08868c6168c98e47", + "zh:d8c6e33b5467c6eb5a970adb251c4c8194af12db5388cff9d4b250294eae4daa", + "zh:f49e4cc9c0b55b3bec7da64dd698298345634a5df372228ee12aa45e57982f64", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + ] +} + +provider "registry.terraform.io/hashicorp/google-beta" { + version = "6.6.0" + constraints = ">= 3.38.0, >= 3.43.0, >= 4.11.0, >= 5.41.0, < 7.0.0" + hashes = [ + "h1:B4Wrkju7TTLWlCIDh+Vh4knFQS3wmFm0NHICGjCNO3k=", + "zh:1bf8f840a9a4ac1e120a6155225a0defbfa7f07b19c9bb37b45f95006b020ccf", + "zh:39077f037e611bdbd6af42e51b2881ea03d62ad55f21b42f90dc09e2cf812753", + "zh:64313dd2158749e3a4f2759edb896a9efa2c2afc59feb38d3af57e31eaa64480", + "zh:6bec8b21a20f50d81ca2e633cdaf1144bb8615a1dedf50e87c86f4eda3467b05", + "zh:74566c568410997fe966ef44130d19d640dbb427ffec3de93f0fd2affeb6fd8f", + "zh:8fe1c42d3229d0fe64961b7fa480689408eff3e5be62eb108d6aa9d36a10a769", + "zh:9593b59efd271623f45d133164eae16676130439727a625c10d3b929d2f28671", + "zh:a72c71431523d1f0d0d8baf7141ff16aa5938c0edf27e05dc4d1dc3455a50d01", + "zh:cbc96a215575d94601ec315a2db8802b521e904aaecf2602bce0110786cfa81f", + "zh:e71c3e06e861d5c9d1782b0bbaef93b5b9defa926304f90ddd22bc9b69ee14bd", + "zh:f559eefcc67d771ce0157e7ec021c1025b8af2a8c7ca89e1f5ac812e16bf9760", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + ] +} + +provider "registry.terraform.io/hashicorp/null" { + version = "3.2.3" + constraints = ">= 2.1.0, < 4.0.0" + hashes = [ + "h1:+AnORRgFbRO6qqcfaQyeX80W0eX3VmjadjnUFUJTiXo=", + "zh:22d062e5278d872fe7aed834f5577ba0a5afe34a3bdac2b81f828d8d3e6706d2", + "zh:23dead00493ad863729495dc212fd6c29b8293e707b055ce5ba21ee453ce552d", + "zh:28299accf21763ca1ca144d8f660688d7c2ad0b105b7202554ca60b02a3856d3", + "zh:55c9e8a9ac25a7652df8c51a8a9a422bd67d784061b1de2dc9fe6c3cb4e77f2f", + "zh:756586535d11698a216291c06b9ed8a5cc6a4ec43eee1ee09ecd5c6a9e297ac1", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:9d5eea62fdb587eeb96a8c4d782459f4e6b73baeece4d04b4a40e44faaee9301", + "zh:a6355f596a3fb8fc85c2fb054ab14e722991533f87f928e7169a486462c74670", + "zh:b5a65a789cff4ada58a5baffc76cb9767dc26ec6b45c00d2ec8b1b027f6db4ed", + "zh:db5ab669cf11d0e9f81dc380a6fdfcac437aea3d69109c7aef1a5426639d2d65", + "zh:de655d251c470197bcbb5ac45d289595295acb8f829f6c781d4a75c8c8b7c7dd", + "zh:f5c68199f2e6076bce92a12230434782bf768103a427e9bb9abee99b116af7b5", + ] +} + +provider "registry.terraform.io/hashicorp/random" { + version = "3.6.3" + constraints = ">= 2.1.0, >= 2.2.0, < 4.0.0" + hashes = [ + "h1:Fnaec9vA8sZ8BXVlN3Xn9Jz3zghSETIKg7ch8oXhxno=", + "zh:04ceb65210251339f07cd4611885d242cd4d0c7306e86dda9785396807c00451", + "zh:448f56199f3e99ff75d5c0afacae867ee795e4dfda6cb5f8e3b2a72ec3583dd8", + "zh:4b4c11ccfba7319e901df2dac836b1ae8f12185e37249e8d870ee10bb87a13fe", + "zh:4fa45c44c0de582c2edb8a2e054f55124520c16a39b2dfc0355929063b6395b1", + "zh:588508280501a06259e023b0695f6a18149a3816d259655c424d068982cbdd36", + "zh:737c4d99a87d2a4d1ac0a54a73d2cb62974ccb2edbd234f333abd079a32ebc9e", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:a357ab512e5ebc6d1fda1382503109766e21bbfdfaa9ccda43d313c122069b30", + "zh:c51bfb15e7d52cc1a2eaec2a903ac2aff15d162c172b1b4c17675190e8147615", + "zh:e0951ee6fa9df90433728b96381fb867e3db98f66f735e0c3e24f8f16903f0ad", + "zh:e3cdcb4e73740621dabd82ee6a37d6cfce7fee2a03d8074df65086760f5cf556", + "zh:eff58323099f1bd9a0bec7cb04f717e7f1b2774c7d612bf7581797e1622613a0", + ] +} + +provider "registry.terraform.io/hashicorp/time" { + version = "0.12.1" + constraints = ">= 0.5.0" + hashes = [ + "h1:6BhxSYBJdBBKyuqatOGkuPKVenfx6UmLdiI13Pb3his=", + "zh:090023137df8effe8804e81c65f636dadf8f9d35b79c3afff282d39367ba44b2", + "zh:26f1e458358ba55f6558613f1427dcfa6ae2be5119b722d0b3adb27cd001efea", + "zh:272ccc73a03384b72b964918c7afeb22c2e6be22460d92b150aaf28f29a7d511", + "zh:438b8c74f5ed62fe921bd1078abe628a6675e44912933100ea4fa26863e340e9", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:85c8bd8eefc4afc33445de2ee7fbf33a7807bc34eb3734b8eefa4e98e4cddf38", + "zh:98bbe309c9ff5b2352de6a047e0ec6c7e3764b4ed3dfd370839c4be2fbfff869", + "zh:9c7bf8c56da1b124e0e2f3210a1915e778bab2be924481af684695b52672891e", + "zh:d2200f7f6ab8ecb8373cda796b864ad4867f5c255cff9d3b032f666e4c78f625", + "zh:d8c7926feaddfdc08d5ebb41b03445166df8c125417b28d64712dccd9feef136", + "zh:e2412a192fc340c61b373d6c20c9d805d7d3dee6c720c34db23c2a8ff0abd71b", + "zh:e6ac6bba391afe728a099df344dbd6481425b06d61697522017b8f7a59957d44", + ] +} diff --git a/infra/terraform/test-org/test-cleanup/cleanup.tf b/infra/terraform/test-org/test-cleanup/cleanup.tf index 5996ae304ac..bce9f9ae44b 100644 --- a/infra/terraform/test-org/test-cleanup/cleanup.tf +++ b/infra/terraform/test-org/test-cleanup/cleanup.tf @@ -1,5 +1,5 @@ /** - * Copyright 2019 Google LLC + * Copyright 2019-2024 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,20 +16,32 @@ module "scheduler-app-engine" { source = "terraform-google-modules/project-factory/google//modules/app_engine" - version = "~> 10.0" + version = "~> 17.0" location_id = local.app_location project_id = module.cft-manager-project.project_id } module "projects_cleaner" { source = "terraform-google-modules/scheduled-function/google//modules/project_cleanup" - version = "~> 2.0" + version = "~> 6.0" - job_schedule = "17 * * * *" - max_project_age_in_hours = "6" - organization_id = local.org_id - project_id = module.cft-manager-project.project_id - region = local.region - target_excluded_labels = local.exclude_labels - target_folder_id = local.cleanup_folder + job_schedule = "17 * * * *" + max_project_age_in_hours = "6" + organization_id = local.org_id + project_id = module.cft-manager-project.project_id + region = local.region + target_excluded_labels = local.exclude_labels + target_folder_id = local.cleanup_folder + clean_up_org_level_tag_keys = true + + clean_up_org_level_cai_feeds = true + target_included_feeds = [".*/feeds/fd-cai-monitoring-.*"] + + clean_up_org_level_scc_notifications = true + target_included_scc_notifications = [".*/notificationConfigs/scc-notify-.*"] + + clean_up_billing_sinks = true + target_billing_sinks = [".*/sinks/sk-c-logging-.*-billing-.*"] + billing_account = local.billing_account + function_docker_registry = "ARTIFACT_REGISTRY" } diff --git a/infra/terraform/test-org/test-cleanup/locals.tf b/infra/terraform/test-org/test-cleanup/locals.tf index 8b97c3fb522..3e7055b3053 100644 --- a/infra/terraform/test-org/test-cleanup/locals.tf +++ b/infra/terraform/test-org/test-cleanup/locals.tf @@ -23,4 +23,3 @@ locals { region = "us-central1" app_location = "us-central" } - diff --git a/infra/terraform/test-org/test-cleanup/outputs.tf b/infra/terraform/test-org/test-cleanup/outputs.tf index 896737a3902..66e8d8183e0 100644 --- a/infra/terraform/test-org/test-cleanup/outputs.tf +++ b/infra/terraform/test-org/test-cleanup/outputs.tf @@ -21,4 +21,3 @@ output "project_id" { output "excluded_labels" { value = local.exclude_labels } - diff --git a/infra/terraform/test-org/test-cleanup/project.tf b/infra/terraform/test-org/test-cleanup/project.tf index ce4372682e4..e81738d508b 100644 --- a/infra/terraform/test-org/test-cleanup/project.tf +++ b/infra/terraform/test-org/test-cleanup/project.tf @@ -16,7 +16,7 @@ module "cft-manager-project" { source = "terraform-google-modules/project-factory/google" - version = "~> 10.0" + version = "~> 17.0" name = "cft-project-manager" random_project_id = true @@ -34,6 +34,8 @@ module "cft-manager-project" { "storage-api.googleapis.com", "serviceusage.googleapis.com", "storage-component.googleapis.com", - "appengine.googleapis.com" + "appengine.googleapis.com", + "securitycenter.googleapis.com", + "cloudasset.googleapis.com" ] } diff --git a/infra/terraform/test-org/test-cleanup/versions.tf b/infra/terraform/test-org/test-cleanup/versions.tf index 085d8abc747..ad3410585ba 100644 --- a/infra/terraform/test-org/test-cleanup/versions.tf +++ b/infra/terraform/test-org/test-cleanup/versions.tf @@ -15,25 +15,27 @@ */ terraform { - required_version = ">= 0.12" -} - -provider "archive" { - version = "~> 1.3" -} - -provider "google" { - version = "~> 3.38" -} - -provider "google-beta" { - version = "~> 3.38" -} - -provider "null" { - version = "~> 2.1" -} - -provider "random" { - version = "~> 2.2" + required_version = ">= 1.4.4" + required_providers { + archive = { + source = "hashicorp/archive" + version = ">= 1.3, < 3" + } + google = { + source = "hashicorp/google" + version = ">= 3.38, < 7" + } + google-beta = { + source = "hashicorp/google-beta" + version = ">= 3.38, < 7" + } + null = { + source = "hashicorp/null" + version = ">= 2.1, < 4" + } + random = { + source = "hashicorp/random" + version = ">= 2.2, < 4" + } + } } diff --git a/infra/terraform/test-org/tf-validator/.terraform.lock.hcl b/infra/terraform/test-org/tf-validator/.terraform.lock.hcl new file mode 100644 index 00000000000..939bcd9bb13 --- /dev/null +++ b/infra/terraform/test-org/tf-validator/.terraform.lock.hcl @@ -0,0 +1,102 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/google" { + version = "6.2.0" + constraints = ">= 3.43.0, >= 4.28.0, >= 5.41.0, < 7.0.0" + hashes = [ + "h1:7JIgzQKRW0AT6UliiYSjYUKxDr03baZpQmt5XVkrujs=", + "zh:08a7dc0b53d2b63baab928e66086bf3e09107516af078ce011d2667456e64834", + "zh:1cf9a1373e516844b43fdcea36e73f5a68f19ad07afcf6093788eb235c710163", + "zh:2d4a7cb26c3f0d036d51db219a09013d3d779e44d584e0fc631df0f2cd5e5550", + "zh:47e1fc68e455f99f1875deaed9aa5434a852e2a70a3cb5a5e9b5a2d8c25d7b74", + "zh:78531a8624ddcd45277e1b465e773ac92001ea0e200e9dc1147ebeb24d56359e", + "zh:a76751723c034d44764df22925178f78d8b4852e3e6ac6c5d86f51666c9e666c", + "zh:a83a59a7e667cfffb0d501a501e9b3d2d4fcc83deb07a318c9690d537cbdc4b6", + "zh:b16473b7e59e01690d8234a0044c304505688f5518b205e9ed06fc63ddc82977", + "zh:b957648ad0383e17149bf3a02def81ebc6bd55ca0cffb6ec1c368a1b4f33c4fd", + "zh:e2f3f4a27b41a20bdbb7a80fbcde1a4c36bbd1c83edb9256bc1724754f8d370f", + "zh:ecfce738f85a81603aa51162d5237d6faaa2ffc0f0e52694f8b420ad761a8957", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + ] +} + +provider "registry.terraform.io/hashicorp/google-beta" { + version = "6.2.0" + constraints = ">= 3.43.0, >= 4.11.0, >= 5.41.0, < 7.0.0" + hashes = [ + "h1:rDpLrqSZEnuUP697LITaMqmIF6k2j9wGLG9mGNl79Zs=", + "zh:01d227ce9fd7538b331efa669fadb33cc9cf6fd3c76351a70b6f71fc513494a2", + "zh:03113bccea6da3ac7c9187728182e940869f17285854709bb9f2d4aed34e44f2", + "zh:33b93d97b757b3d92a566a553a79b1ebd1c6bbdfa3c5e3244b8fe6f73789dff8", + "zh:697a6cb1b86ca5bc2e03845a6a4cbfe9ce3c287d4df3cfdc51673a7a47c71a8e", + "zh:7232e153f55f6b92377e482a93c4c790b9b7ed87ac79b84fac046bf04aa0b22b", + "zh:a9ca71fe4f5ab3800bde2bff312d1d3ed21864692a7948577f4d3d9b4753c503", + "zh:bdbb86941ec6d9be00d7ed37917a90ffe10314a2c604f1517c4ecf5d7d2d6aac", + "zh:caeaa6f1dca10564c73b86692da63976f971af9a36d93129ae571300c0002ff5", + "zh:cd6e45a56a707e84a84dd17b1357ad7615eaaf1335bcb2ee8dcb9ad239e015f9", + "zh:ce85d507416433bca5ea31537c93dee6d3900f01b2f1d3b3d7caca16294383a6", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + "zh:fa95af70f352338e73615500ccee22c79ce7f2ec25444b52cde9f798e75eafeb", + ] +} + +provider "registry.terraform.io/hashicorp/null" { + version = "3.2.3" + constraints = ">= 2.1.0" + hashes = [ + "h1:+AnORRgFbRO6qqcfaQyeX80W0eX3VmjadjnUFUJTiXo=", + "zh:22d062e5278d872fe7aed834f5577ba0a5afe34a3bdac2b81f828d8d3e6706d2", + "zh:23dead00493ad863729495dc212fd6c29b8293e707b055ce5ba21ee453ce552d", + "zh:28299accf21763ca1ca144d8f660688d7c2ad0b105b7202554ca60b02a3856d3", + "zh:55c9e8a9ac25a7652df8c51a8a9a422bd67d784061b1de2dc9fe6c3cb4e77f2f", + "zh:756586535d11698a216291c06b9ed8a5cc6a4ec43eee1ee09ecd5c6a9e297ac1", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:9d5eea62fdb587eeb96a8c4d782459f4e6b73baeece4d04b4a40e44faaee9301", + "zh:a6355f596a3fb8fc85c2fb054ab14e722991533f87f928e7169a486462c74670", + "zh:b5a65a789cff4ada58a5baffc76cb9767dc26ec6b45c00d2ec8b1b027f6db4ed", + "zh:db5ab669cf11d0e9f81dc380a6fdfcac437aea3d69109c7aef1a5426639d2d65", + "zh:de655d251c470197bcbb5ac45d289595295acb8f829f6c781d4a75c8c8b7c7dd", + "zh:f5c68199f2e6076bce92a12230434782bf768103a427e9bb9abee99b116af7b5", + ] +} + +provider "registry.terraform.io/hashicorp/random" { + version = "3.6.3" + constraints = ">= 2.2.0" + hashes = [ + "h1:Fnaec9vA8sZ8BXVlN3Xn9Jz3zghSETIKg7ch8oXhxno=", + "zh:04ceb65210251339f07cd4611885d242cd4d0c7306e86dda9785396807c00451", + "zh:448f56199f3e99ff75d5c0afacae867ee795e4dfda6cb5f8e3b2a72ec3583dd8", + "zh:4b4c11ccfba7319e901df2dac836b1ae8f12185e37249e8d870ee10bb87a13fe", + "zh:4fa45c44c0de582c2edb8a2e054f55124520c16a39b2dfc0355929063b6395b1", + "zh:588508280501a06259e023b0695f6a18149a3816d259655c424d068982cbdd36", + "zh:737c4d99a87d2a4d1ac0a54a73d2cb62974ccb2edbd234f333abd079a32ebc9e", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:a357ab512e5ebc6d1fda1382503109766e21bbfdfaa9ccda43d313c122069b30", + "zh:c51bfb15e7d52cc1a2eaec2a903ac2aff15d162c172b1b4c17675190e8147615", + "zh:e0951ee6fa9df90433728b96381fb867e3db98f66f735e0c3e24f8f16903f0ad", + "zh:e3cdcb4e73740621dabd82ee6a37d6cfce7fee2a03d8074df65086760f5cf556", + "zh:eff58323099f1bd9a0bec7cb04f717e7f1b2774c7d612bf7581797e1622613a0", + ] +} + +provider "registry.terraform.io/hashicorp/time" { + version = "0.12.1" + constraints = ">= 0.5.0" + hashes = [ + "h1:6BhxSYBJdBBKyuqatOGkuPKVenfx6UmLdiI13Pb3his=", + "zh:090023137df8effe8804e81c65f636dadf8f9d35b79c3afff282d39367ba44b2", + "zh:26f1e458358ba55f6558613f1427dcfa6ae2be5119b722d0b3adb27cd001efea", + "zh:272ccc73a03384b72b964918c7afeb22c2e6be22460d92b150aaf28f29a7d511", + "zh:438b8c74f5ed62fe921bd1078abe628a6675e44912933100ea4fa26863e340e9", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:85c8bd8eefc4afc33445de2ee7fbf33a7807bc34eb3734b8eefa4e98e4cddf38", + "zh:98bbe309c9ff5b2352de6a047e0ec6c7e3764b4ed3dfd370839c4be2fbfff869", + "zh:9c7bf8c56da1b124e0e2f3210a1915e778bab2be924481af684695b52672891e", + "zh:d2200f7f6ab8ecb8373cda796b864ad4867f5c255cff9d3b032f666e4c78f625", + "zh:d8c7926feaddfdc08d5ebb41b03445166df8c125417b28d64712dccd9feef136", + "zh:e2412a192fc340c61b373d6c20c9d805d7d3dee6c720c34db23c2a8ff0abd71b", + "zh:e6ac6bba391afe728a099df344dbd6481425b06d61697522017b8f7a59957d44", + ] +} diff --git a/infra/terraform/test-org/tf-validator/iam.tf b/infra/terraform/test-org/tf-validator/iam.tf index add323a17ba..e847df619b3 100644 --- a/infra/terraform/test-org/tf-validator/iam.tf +++ b/infra/terraform/test-org/tf-validator/iam.tf @@ -35,8 +35,14 @@ resource "google_project_iam_member" "kokoro_test_1" { member = "serviceAccount:kokoro-trampoline@cloud-devrel-kokoro-resources.iam.gserviceaccount.com" } -resource "google_project_iam_member" "magic_modules_cloudbuild_sa" { - project = module.terraform_validator_test_project.project_id - role = "roles/editor" - member = "serviceAccount:673497134629@cloudbuild.gserviceaccount.com" +resource "google_folder_iam_member" "magic_modules_cloudbuild_sa_folder_viewer" { + folder = local.projects_folder_id + role = "roles/resourcemanager.folderViewer" + member = local.magic_modules_cloudbuild_sa +} + +resource "google_folder_iam_member" "magic_modules_cloudbuild_sa_security_reviewer" { + folder = local.projects_folder_id + role = "roles/iam.securityReviewer" + member = local.magic_modules_cloudbuild_sa } diff --git a/infra/terraform/test-org/tf-validator/locals.tf b/infra/terraform/test-org/tf-validator/locals.tf index 6b7fcd55a3d..327eabed150 100644 --- a/infra/terraform/test-org/tf-validator/locals.tf +++ b/infra/terraform/test-org/tf-validator/locals.tf @@ -20,8 +20,11 @@ locals { "roles/editor" ] - folder_id = data.terraform_remote_state.org.outputs.folders["ci-terraform-validator"] - org_id = data.terraform_remote_state.org.outputs.org_id - billing_account = data.terraform_remote_state.org.outputs.billing_account - cft_ci_group = data.terraform_remote_state.org.outputs.cft_ci_group + magic_modules_cloudbuild_sa = "serviceAccount:673497134629@cloudbuild.gserviceaccount.com" + + projects_folder_id = data.terraform_remote_state.org.outputs.folders["ci-projects"] + folder_id = data.terraform_remote_state.org.outputs.folders["ci-terraform-validator"] + org_id = data.terraform_remote_state.org.outputs.org_id + billing_account = data.terraform_remote_state.org.outputs.billing_account + cft_ci_group = data.terraform_remote_state.org.outputs.cft_ci_group } diff --git a/infra/terraform/test-org/tf-validator/project.tf b/infra/terraform/test-org/tf-validator/project.tf index 775c274f108..a455cad5e12 100644 --- a/infra/terraform/test-org/tf-validator/project.tf +++ b/infra/terraform/test-org/tf-validator/project.tf @@ -19,7 +19,7 @@ // there are no reason to create an ephemeral test project + service account each build. module "terraform_validator_test_project" { source = "terraform-google-modules/project-factory/google" - version = "~> 11.0" + version = "~> 17.0" name = local.terraform_validator_project_name random_project_id = true diff --git a/infra/terraform/test-org/tf-validator/versions.tf b/infra/terraform/test-org/tf-validator/versions.tf index 42dbd4f2a25..715c70afdf6 100644 --- a/infra/terraform/test-org/tf-validator/versions.tf +++ b/infra/terraform/test-org/tf-validator/versions.tf @@ -1,5 +1,5 @@ /** - * Copyright 2019 Google LLC + * Copyright 2019-2024 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,10 +15,16 @@ */ terraform { - required_version = ">= 0.13" + required_version = ">= 1.4.4" required_providers { google = { - source = "hashicorp/google" + source = "hashicorp/google" + version = "< 7" } + google-beta = { + source = "hashicorp/google-beta" + version = "< 7" + } + } } diff --git a/infra/utils/delete-projects.py b/infra/utils/delete-projects.py deleted file mode 100755 index 76cfced96a2..00000000000 --- a/infra/utils/delete-projects.py +++ /dev/null @@ -1,102 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import google.api_core -from google.cloud import resource_manager -from googleapiclient.discovery import build -import sys -from pprint import pprint -import argparse -from googleapiclient import discovery -from oauth2client.client import GoogleCredentials - -client = resource_manager.Client() -credentials = GoogleCredentials.get_application_default() -client2 = build('cloudresourcemanager', 'v2', credentials=credentials) - -def delete_liens(project_id): - service = discovery.build('cloudresourcemanager', 'v1', credentials=credentials) - - parent = 'projects/{}'.format(project_id) - request = service.liens().list(parent=parent) - response = request.execute() - - liens_deleted = 0 - - for lien in response.get('liens', []): - print("Deleting lien:", lien) - d_request = service.liens().delete(name=lien.get('name')) - d_request.execute() - liens_deleted += 1 - - return liens_deleted - -def delete_project(project): - try: - project.delete() - except google.api_core.exceptions.BadRequest as e: - liens_deleted = delete_liens(project.project_id) - if liens_deleted >= 1: - delete_project(project) - else: - print("Bad request and no liens found.") - print(e) - except (google.api_core.exceptions.Forbidden) as e: - print("Failed to delete {}".format(project.project_id)) - print(e) - -def delete_children(parent_type, parent_id, delete_root=True): - print("Deleting children of {} {}".format(parent_type, parent_id)) - - project_filter = { - 'parent.type': parent_type, - 'parent.id': parent_id - } - for project in client.list_projects(project_filter): - if (project.status != "ACTIVE"): - print(" Skipping deletion of inactive project {}...".format(project.project_id)) - continue - print(" Deleting project {} (status={})...".format(project.project_id, project.status)) - delete_project(project) - - name = "{}s/{}".format(parent_type, parent_id) - res = client2.folders().list(parent=name).execute() - for folder in res.get('folders', []): - delete_children("folder", folder.get('name').split('/')[-1]) - - if delete_root: - deletion = client2.folders().delete(name=name).execute() - if deletion.get('lifecycleState') == 'DELETE_REQUESTED': - print("Deleted {}".format(name)) - else: - print(deletion) - -def main(argv): - parser = argparser() - args = parser.parse_args(argv[1:]) - - (parent_type, parent_id) = args.parent_id.split('/') - - delete_children(parent_type.strip('s'), parent_id, delete_root=False) - -def argparser(): - parser = argparse.ArgumentParser(description='Delete projects within a folder') - parser.add_argument('parent_id') - return parser - - -if __name__ == "__main__": - main(sys.argv) diff --git a/infra/utils/fbf/README.md b/infra/utils/fbf/README.md new file mode 100644 index 00000000000..bdfc5b46b99 --- /dev/null +++ b/infra/utils/fbf/README.md @@ -0,0 +1,19 @@ +# Flaky Build Finder + +## Usage + +``` +go install github.com/GoogleCloudPlatformcloud-foundation-toolkit/infra/utils/fbf +``` + +``` +Usage of fbf: + -end-time string + Time to stop computing flakes in form MM-DD-YYYY + -project-id string + Project ID + -start-time string + Time to start computing flakes in form MM-DD-YYYY + -verbose + Display detailed table with flaky build IDs +``` diff --git a/infra/utils/fbf/cmd/cb.go b/infra/utils/fbf/cmd/cb.go new file mode 100644 index 00000000000..2a48b28af1f --- /dev/null +++ b/infra/utils/fbf/cmd/cb.go @@ -0,0 +1,63 @@ +package cmd + +import ( + "context" + "fmt" + "time" + + cloudbuild "google.golang.org/api/cloudbuild/v1" +) + +const ( + successStatus = "SUCCESS" + failedStatus = "FAILURE" +) + +// getCBBuildsWithFilter returns a list of cloudbuild builds in projectID within start and end time +func getCBBuildsWithFilter(start, end time.Time, projectID string) ([]*build, error) { + ctx := context.Background() + cloudbuildService, err := cloudbuild.NewService(ctx) + if err != nil { + return nil, fmt.Errorf("error creating cloudbuild service: %v", err) + } + + filter := fmt.Sprintf("create_time>=\"%s\" AND create_time<\"%s\"", formatTimeCB(start), formatTimeCB(end)) + c, err := cloudbuildService.Projects.Builds.List(projectID).Filter(filter).Do() + if err != nil { + return nil, fmt.Errorf("error listing builds with filter %s in project %s: %v", filter, projectID, err) + } + cbBuilds := c.Builds + if len(cbBuilds) < 1 { + return nil, fmt.Errorf("no builds found with filter %s in project %s", filter, projectID) + } + + for { + c, err = cloudbuildService.Projects.Builds.List(projectID).Filter(filter).PageToken(c.NextPageToken).Do() + if err != nil { + return nil, fmt.Errorf("error retriving next page with token %s: %v", c.NextPageToken, err) + } + cbBuilds = append(cbBuilds, c.Builds...) + if c.NextPageToken == "" { + break + } + } + + builds := []*build{} + for _, b := range cbBuilds { + // filter out builds not triggered from source repos + commit, commitExists := b.Substitutions["COMMIT_SHA"] + if !commitExists { + continue + } + repoName, repoNameExists := b.Substitutions["REPO_NAME"] + if !repoNameExists { + continue + } + triggerName, triggerNameExists := b.Substitutions["TRIGGER_NAME"] + if !triggerNameExists { + continue + } + builds = append(builds, &build{commitSHA: commit, repoName: repoName, jobName: triggerName, id: b.Id, status: b.Status}) + } + return builds, nil +} diff --git a/infra/utils/fbf/cmd/flaky.go b/infra/utils/fbf/cmd/flaky.go new file mode 100644 index 00000000000..7cb2cb8a886 --- /dev/null +++ b/infra/utils/fbf/cmd/flaky.go @@ -0,0 +1,160 @@ +package cmd + +import ( + "fmt" + "os" + "time" + + "github.com/briandowns/spinner" + "github.com/jedib0t/go-pretty/v6/table" +) + +// FlakyFinder finds flakes between start and end times +type FlakyFinder struct { + startTime time.Time + endTime time.Time + projectID string + verbose bool + flakes map[string]flake +} + +// flake represents a collection of flaky builds for a given commit +type flake struct { + repo string // repo name + commitSHA string // commit SHA + passes map[string]*build // builds passed for commit + fails map[string]*build // builds passed for commit +} + +// build represents a single instance of a job invoken at commitSHA on a source repo +type build struct { + repoName string + jobName string + commitSHA string + id string + status string +} + +// todo(bharathkkb): use config +func NewFlakyFinder(start, end, projectID string, verbose bool) (*FlakyFinder, error) { + startTime, err := getTimeFromStr(start) + if err != nil { + return nil, fmt.Errorf("error parsing startime: %v", err) + } + endTime, err := getTimeFromStr(end) + if err != nil { + return nil, fmt.Errorf("error parsing endTime: %v", err) + } + if projectID == "" { + return nil, fmt.Errorf("error got empty project ID") + } + return &FlakyFinder{ + startTime: startTime, + endTime: endTime, + projectID: projectID, + verbose: verbose, + }, nil +} + +func (f *FlakyFinder) ComputeFlakes() error { + // get builds + s := spinner.New(spinner.CharSets[35], 500*time.Millisecond) + s.Start() + // todo(bharathkkb): support other build systems + builds, err := getCBBuildsWithFilter(f.startTime, f.endTime, f.projectID) + if err != nil { + return fmt.Errorf("error getting builds: %v", err) + } + s.Stop() + // compute flakes + f.flakes = computeFlakesFromBuilds(builds) + return nil +} + +// computeFlakesFromBuilds computes flakes from a slice of builds +// a collection of builds are considered flakey iff at least two builds +// have passed and failed at the same commit in a repo when triggered by the same job +func computeFlakesFromBuilds(builds []*build) map[string]flake { + flakes := make(map[string]flake) + for _, b1 := range builds { + // commit may have multiple builds so the key for flake lookup is + // computed from commitSHA and job name + flakeKey := fmt.Sprintf("%s-%s", b1.commitSHA, b1.jobName) + // skip if flakes with same flakeKey were previously computed + //todo(bharathkkb): optimize, we can probably remove elems in a flake from build slice + _, exists := flakes[flakeKey] + if exists { + continue + } + // store individual build info + passedBuildsWithCommit := make(map[string]*build) + failedBuildsWithCommit := make(map[string]*build) + storeBuildInfo := func(b *build) { + switch b.status { + case successStatus: + passedBuildsWithCommit[b.id] = b + case failedStatus: + failedBuildsWithCommit[b.id] = b + } + } + storeBuildInfo(b1) + + for _, b2 := range builds { + // match other builds with same commit,repo and job + if b1.commitSHA == b2.commitSHA && + b1.repoName == b2.repoName && + b1.jobName == b2.jobName { + storeBuildInfo(b2) + } + } + + // At least one pass and one fail for a given commit is necessary to become a flake + if len(passedBuildsWithCommit) > 0 && len(failedBuildsWithCommit) > 0 { + flakes[flakeKey] = flake{repo: b1.repoName, commitSHA: b1.commitSHA, passes: passedBuildsWithCommit, fails: failedBuildsWithCommit} + } + } + return flakes +} + +// render displays results in a tabular format +func (f *FlakyFinder) Render() { + // verbose table with build ids + tableVerbose := table.NewWriter() + tableVerbose.SetOutputMirror(os.Stdout) + tableVerbose.AppendHeader(table.Row{"Repo", "Commit", "Pass Build IDs", "Fail Build IDs"}) + // flakes per repo + repoFlakeCount := make(map[string]int) + // flake failures per repo + repoFlakeFailCount := make(map[string]int) + for _, f := range f.flakes { + repoFlakeCount[f.repo]++ + repoFlakeFailCount[f.repo] += len(f.fails) + pass := "" + for id := range f.passes { + pass += id + "\n" + } + fail := "" + for id := range f.fails { + fail += id + "\n" + } + tableVerbose.AppendRow(table.Row{f.repo, f.commitSHA, pass, fail}) + tableVerbose.AppendSeparator() + } + if f.verbose { + tableVerbose.Render() + } + + // overview table with total number of flakes per repo + tableOverview := table.NewWriter() + tableOverview.SetOutputMirror(os.Stdout) + tableOverview.AppendHeader(table.Row{"Repo", "Flakes", "Flake Failures"}) + totalFlakeCount := 0 + totalFlakeFailCount := 0 + for repo, flakeCount := range repoFlakeCount { + tableOverview.AppendRow(table.Row{repo, flakeCount, repoFlakeFailCount[repo]}) + totalFlakeCount += flakeCount + totalFlakeFailCount += repoFlakeFailCount[repo] + } + tableOverview.AppendFooter(table.Row{"Total", totalFlakeCount, totalFlakeFailCount}) + tableOverview.Render() +} diff --git a/infra/utils/fbf/cmd/flaky_test.go b/infra/utils/fbf/cmd/flaky_test.go new file mode 100644 index 00000000000..c2c4b5a7f92 --- /dev/null +++ b/infra/utils/fbf/cmd/flaky_test.go @@ -0,0 +1,81 @@ +package cmd + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func Test_computeFlakesFromBuilds(t *testing.T) { + tests := []struct { + name string + builds []*build + want map[string]flake + }{ + { + name: "single", + builds: []*build{ + getBuild("foo", "j1", "1", "id1", true), + getBuild("foo", "j1", "1", "id2", false), + }, + want: map[string]flake{"1-j1": { + repo: "foo", + commitSHA: "1", + passes: map[string]*build{"id1": getBuild("foo", "j1", "1", "id1", true)}, + fails: map[string]*build{"id2": getBuild("foo", "j1", "1", "id2", false)}, + }, + }, + }, + { + name: "multiple with no flakes", + builds: []*build{ + getBuild("foo", "j1", "1", "id1", true), + getBuild("foo", "j1", "2", "id2", false), + }, + want: map[string]flake{}, + }, + { + name: "multiple flakes", + builds: []*build{ + getBuild("foo", "j1", "1", "id1", true), + getBuild("foo", "j1", "1", "id2", false), + getBuild("foo", "j1", "1", "id3", false), + getBuild("foo", "j1", "2", "id4", true), + getBuild("foo", "j1", "2", "id5", false), + getBuild("bar", "j1", "2", "id6", false), + getBuild("bar", "j2", "2", "id7", false), + }, + want: map[string]flake{ + "1-j1": { + repo: "foo", + commitSHA: "1", + passes: map[string]*build{"id1": getBuild("foo", "j1", "1", "id1", true)}, + fails: map[string]*build{ + "id2": getBuild("foo", "j1", "1", "id2", false), + "id3": getBuild("foo", "j1", "1", "id3", false), + }, + }, + "2-j1": { + repo: "foo", + commitSHA: "2", + passes: map[string]*build{"id4": getBuild("foo", "j1", "2", "id4", true)}, + fails: map[string]*build{"id5": getBuild("foo", "j1", "2", "id5", false)}, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := computeFlakesFromBuilds(tt.builds) + require.EqualValues(t, tt.want, got) + }) + } +} + +func getBuild(repoName, jobName, commitSHA string, id string, pass bool) *build { + status := failedStatus + if pass { + status = successStatus + } + return &build{repoName: repoName, jobName: jobName, commitSHA: commitSHA, id: id, status: status} +} diff --git a/infra/utils/fbf/cmd/time.go b/infra/utils/fbf/cmd/time.go new file mode 100644 index 00000000000..faeb051b0c2 --- /dev/null +++ b/infra/utils/fbf/cmd/time.go @@ -0,0 +1,13 @@ +package cmd + +import ( + "time" +) + +func getTimeFromStr(t string) (time.Time, error) { + return time.Parse("01-02-2006", t) +} + +func formatTimeCB(t time.Time) string { + return t.Format(time.RFC3339) +} diff --git a/infra/utils/fbf/go.mod b/infra/utils/fbf/go.mod new file mode 100644 index 00000000000..82316318f15 --- /dev/null +++ b/infra/utils/fbf/go.mod @@ -0,0 +1,45 @@ +module github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/utils/fbf + +go 1.22 + +require ( + github.com/briandowns/spinner v1.23.1 + github.com/jedib0t/go-pretty/v6 v6.6.5 + github.com/stretchr/testify v1.10.0 + google.golang.org/api v0.214.0 +) + +require ( + cloud.google.com/go/auth v0.13.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.6 // indirect + cloud.google.com/go/compute/metadata v0.6.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/fatih/color v1.15.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/s2a-go v0.1.8 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect + github.com/googleapis/gax-go/v2 v2.14.0 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.18 // indirect + github.com/mattn/go-runewidth v0.0.15 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/rivo/uniseg v0.4.4 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect + go.opentelemetry.io/otel v1.29.0 // indirect + go.opentelemetry.io/otel/metric v1.29.0 // indirect + go.opentelemetry.io/otel/trace v1.29.0 // indirect + golang.org/x/crypto v0.31.0 // indirect + golang.org/x/net v0.33.0 // indirect + golang.org/x/oauth2 v0.24.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/term v0.27.0 // indirect + golang.org/x/text v0.21.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241206012308-a4fef0638583 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 // indirect + google.golang.org/grpc v1.67.1 // indirect + google.golang.org/protobuf v1.35.2 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/infra/utils/fbf/go.sum b/infra/utils/fbf/go.sum new file mode 100644 index 00000000000..d63b89a1fea --- /dev/null +++ b/infra/utils/fbf/go.sum @@ -0,0 +1,83 @@ +cloud.google.com/go/auth v0.13.0 h1:8Fu8TZy167JkW8Tj3q7dIkr2v4cndv41ouecJx0PAHs= +cloud.google.com/go/auth v0.13.0/go.mod h1:COOjD9gwfKNKz+IIduatIhYJQIc0mG3H102r/EMxX6Q= +cloud.google.com/go/auth/oauth2adapt v0.2.6 h1:V6a6XDu2lTwPZWOawrAa9HUK+DB2zfJyTuciBG5hFkU= +cloud.google.com/go/auth/oauth2adapt v0.2.6/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= +cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= +cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= +github.com/briandowns/spinner v1.23.1 h1:t5fDPmScwUjozhDj4FA46p5acZWIPXYE30qW2Ptu650= +github.com/briandowns/spinner v1.23.1/go.mod h1:LaZeM4wm2Ywy6vO571mvhQNRcWfRUnXOs0RcKV0wYKM= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= +github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= +github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= +github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= +github.com/googleapis/gax-go/v2 v2.14.0 h1:f+jMrjBPl+DL9nI4IQzLUxMq7XrAqFYB7hBPqMNIe8o= +github.com/googleapis/gax-go/v2 v2.14.0/go.mod h1:lhBCnjdLrWRaPvLWhmc8IS24m9mr07qSYnHncrgo+zk= +github.com/jedib0t/go-pretty/v6 v6.6.5 h1:9PgMJOVBedpgYLI56jQRJYqngxYAAzfEUua+3NgSqAo= +github.com/jedib0t/go-pretty/v6 v6.6.5/go.mod h1:Uq/HrbhuFty5WSVNfjpQQe47x16RwVGXIveNGEyGtHs= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98= +github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= +github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= +github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= +go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= +go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= +go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= +go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= +go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= +go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= +golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +google.golang.org/api v0.214.0 h1:h2Gkq07OYi6kusGOaT/9rnNljuXmqPnaig7WGPmKbwA= +google.golang.org/api v0.214.0/go.mod h1:bYPpLG8AyeMWwDU6NXoB00xC0DFkikVvd5MfwoxjLqE= +google.golang.org/genproto/googleapis/api v0.0.0-20241206012308-a4fef0638583 h1:v+j+5gpj0FopU0KKLDGfDo9ZRRpKdi5UBrCP0f76kuY= +google.golang.org/genproto/googleapis/api v0.0.0-20241206012308-a4fef0638583/go.mod h1:jehYqy3+AhJU9ve55aNOaSml7wUXjF9x6z2LcCfpAhY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= +google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= +google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= +google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/infra/utils/fbf/main.go b/infra/utils/fbf/main.go new file mode 100644 index 00000000000..cf277ae0083 --- /dev/null +++ b/infra/utils/fbf/main.go @@ -0,0 +1,26 @@ +package main + +import ( + "flag" + "log" + + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/utils/fbf/cmd" +) + +func main() { + startTime := flag.String("start-time", "", "Time to start computing flakes in form MM-DD-YYYY") + endTime := flag.String("end-time", "", "Time to stop computing flakes in form MM-DD-YYYY") + projectID := flag.String("project-id", "", "Project ID") + verbose := flag.Bool("verbose", false, "Display detailed table with flaky build IDs") + flag.Parse() + + ftf, err := cmd.NewFlakyFinder(*startTime, *endTime, *projectID, *verbose) + if err != nil { + log.Fatalf("error initializing flaky finder: %v", err) + } + err = ftf.ComputeFlakes() + if err != nil { + log.Fatalf("error computing flakes: %v", err) + } + ftf.Render() +} diff --git a/infra/utils/requirements.txt b/infra/utils/requirements.txt deleted file mode 100644 index e4aa8717e5d..00000000000 --- a/infra/utils/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -google-cloud==0.34.0 -google-cloud-resource-manager==0.30.2 -oauth2client==4.1.3 -google-api-python-client==1.12.8 diff --git a/release-please-config.json b/release-please-config.json new file mode 100644 index 00000000000..3d57e6ea89e --- /dev/null +++ b/release-please-config.json @@ -0,0 +1,28 @@ +{ + "separate-pull-requests": true, + "tag-separator": "/", + "include-component-in-tag": true, + "packages": { + "infra/blueprint-test": { + "release-type": "go", + "package-name": "blueprint-test", + "component": "infra/blueprint-test", + "pull-request-title-pattern": "chore(release-please): release blueprint-test ${version}", + "bump-minor-pre-major": true + }, + "infra/module-swapper": { + "release-type": "go", + "package-name": "module-swapper", + "component": "infra/module-swapper", + "pull-request-title-pattern": "chore(release-please): release module-swapper ${version}", + "bump-minor-pre-major": true + }, + "tflint-ruleset-blueprint": { + "release-type": "go", + "package-name": "tflint-ruleset-blueprint", + "component": "tflint-ruleset-blueprint", + "pull-request-title-pattern": "chore(release-please): release tflint-ruleset-blueprint ${version}", + "bump-minor-pre-major": true + } + } +} diff --git a/tflint-ruleset-blueprint/.gitignore b/tflint-ruleset-blueprint/.gitignore new file mode 100644 index 00000000000..60a78ebf955 --- /dev/null +++ b/tflint-ruleset-blueprint/.gitignore @@ -0,0 +1,5 @@ +tflint-ruleset-blueprint +.terraform +.terraform.lock.hcl +terraform.tfstate +terraform.tfstate.backup diff --git a/tflint-ruleset-blueprint/.goreleaser.yml b/tflint-ruleset-blueprint/.goreleaser.yml new file mode 100644 index 00000000000..7f3d4be6362 --- /dev/null +++ b/tflint-ruleset-blueprint/.goreleaser.yml @@ -0,0 +1,32 @@ +project_name: tflint-ruleset-blueprint +env: + - CGO_ENABLED=0 +builds: + - targets: + - darwin_amd64 + - darwin_arm64 + - linux_386 + - linux_amd64 + - linux_arm + - linux_arm64 + - windows_386 + - windows_amd64 + hooks: + post: + - mkdir -p ./dist/raw + - cp "{{ .Path }}" "./dist/raw/{{ .ProjectName }}_{{ .Os }}_{{ .Arch }}" +archives: + - id: zip + name_template: "{{ .ProjectName }}_{{ .Os }}_{{ .Arch }}" + format: zip + files: + - none* +checksum: + name_template: 'checksums.txt' + extra_files: + - glob: ./dist/raw/* +changelog: + disable: true +release: + mode: 'keep-existing' + name_template: "tflint-ruleset-blueprint/v{{.Version}}" diff --git a/tflint-ruleset-blueprint/CHANGELOG.md b/tflint-ruleset-blueprint/CHANGELOG.md new file mode 100644 index 00000000000..061eb3ef860 --- /dev/null +++ b/tflint-ruleset-blueprint/CHANGELOG.md @@ -0,0 +1,72 @@ +# Changelog + +## [0.2.7](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/tflint-ruleset-blueprint/v0.2.6...tflint-ruleset-blueprint/v0.2.7) (2025-01-03) + + +### Bug Fixes + +* **deps:** Update module golang.org/x/net to v0.33.0 ([#2784](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2784)) ([b2655aa](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/b2655aa9f00800808cf0b9612ccf7f35fbec49c8)) + +## [0.2.6](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/tflint-ruleset-blueprint/v0.2.5...tflint-ruleset-blueprint/v0.2.6) (2024-12-05) + + +### Bug Fixes + +* **deps:** update module github.com/hashicorp/hcl/v2 to v2.23.0 ([#2709](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2709)) ([978454e](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/978454ea6e3ff9dbc2f052ddc174f5d6bc35a26e)) + +## [0.2.5](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/tflint-ruleset-blueprint/v0.2.4...tflint-ruleset-blueprint/v0.2.5) (2024-09-13) + + +### Bug Fixes + +* **deps:** update dependency go to v1.22.7 ([#2593](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2593)) ([fd980e0](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/fd980e05c527a1eb97b29ee1715e75b015ca7700)) +* **deps:** update module github.com/hashicorp/hcl/v2 to v2.22.0 ([#2545](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2545)) ([12f8179](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/12f8179dab8bc869110144ab7fc759e987aee15c)) + +## [0.2.4](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/tflint-ruleset-blueprint/v0.2.3...tflint-ruleset-blueprint/v0.2.4) (2024-08-15) + + +### Bug Fixes + +* **terraform_required_version_range:** terraform block ([#2529](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2529)) ([f5dce52](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/f5dce52092c3c41662c7722db99d80c0e4a9d74d)) + +## [0.2.3](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/tflint-ruleset-blueprint/v0.2.2...tflint-ruleset-blueprint/v0.2.3) (2024-08-14) + + +### Bug Fixes + +* **terraform_required_version_range:** skip missing ([#2522](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2522)) ([23fc89f](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/23fc89f97b968b97c2a9da8235ed62b45e181d6c)) + +## [0.2.2](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/tflint-ruleset-blueprint/v0.2.1...tflint-ruleset-blueprint/v0.2.2) (2024-08-14) + + +### Bug Fixes + +* **tflint-ruleset-blueprint:** increment internal version ([#2518](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2518)) ([1f450a3](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/1f450a3ee0bb9c660f04b8ff491a680d9f667ab1)) + +## [0.2.1](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/tflint-ruleset-blueprint/v0.2.0...tflint-ruleset-blueprint/v0.2.1) (2024-08-13) + + +### Bug Fixes + +* **tflint:** rename plugin ([#2516](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2516)) ([0cc19e2](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/0cc19e2068b9b41e594ed0659319ed03a0f7b5b7)) + +## [0.2.0](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/tflint-ruleset-blueprint/v0.1.0...tflint-ruleset-blueprint/v0.2.0) (2024-08-09) + + +### Features + +* **tflint_bp_plugin:** add min & max parameters ([#2502](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2502)) ([6b5c501](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/6b5c501bce5558aa5d2aef315c2a4d273c664d81)) +* **tflint-ruleset:** add terraform_required_version ([#2485](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2485)) ([091a4ff](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/091a4ff2c68dfccb8a5011b039a22cc34074ccef)) + + +### Bug Fixes + +* **deps:** update module github.com/terraform-linters/tflint-plugin-sdk to v0.20.0 ([#2380](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2380)) ([6bc4f2d](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/6bc4f2d709ec3878467ca34db8290b95238fa200)) +* **deps:** update module github.com/terraform-linters/tflint-plugin-sdk to v0.21.0 ([#2479](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2479)) ([78de0b9](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/78de0b9369c4d92fefc3c6299ade0aa3554e79b5)) + +## [0.1.0](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/compare/tflint-ruleset-blueprint-v0.0.1...tflint-ruleset-blueprint/v0.1.0) (2024-05-02) + + +### Features + +* blueprints tflint plugin ([#2265](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/issues/2265)) ([461302e](https://github.com/GoogleCloudPlatform/cloud-foundation-toolkit/commit/461302e839616b95eef08523bbcb5d598e834d70)) diff --git a/tflint-ruleset-blueprint/Makefile b/tflint-ruleset-blueprint/Makefile new file mode 100644 index 00000000000..ae016fe17ca --- /dev/null +++ b/tflint-ruleset-blueprint/Makefile @@ -0,0 +1,11 @@ +default: build + +test: + go test ./... -v + +build: + go build + +install: build + mkdir -p ~/.tflint.d/plugins + mv ./tflint-ruleset-blueprint ~/.tflint.d/plugins diff --git a/tflint-ruleset-blueprint/README.md b/tflint-ruleset-blueprint/README.md new file mode 100644 index 00000000000..b8dcd12c1c3 --- /dev/null +++ b/tflint-ruleset-blueprint/README.md @@ -0,0 +1,3 @@ +# Blueprints ruleset plugin for TFLint + +TFlint ruleset plugin enforcing best practices for TF blueprints and samples. diff --git a/tflint-ruleset-blueprint/go.mod b/tflint-ruleset-blueprint/go.mod new file mode 100644 index 00000000000..1b99177ba6a --- /dev/null +++ b/tflint-ruleset-blueprint/go.mod @@ -0,0 +1,39 @@ +module github.com/cloud-foundation-toolkit/tflint-ruleset-blueprint + +go 1.22.2 + +toolchain go1.23.4 + +require ( + github.com/hashicorp/go-version v1.7.0 + github.com/hashicorp/hcl/v2 v2.23.0 + github.com/terraform-linters/tflint-plugin-sdk v0.21.0 +) + +require ( + github.com/agext/levenshtein v1.2.1 // indirect + github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect + github.com/fatih/color v1.13.0 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/hashicorp/go-hclog v1.6.3 // indirect + github.com/hashicorp/go-plugin v1.6.1 // indirect + github.com/hashicorp/yamux v0.1.1 // indirect + github.com/mattn/go-colorable v0.1.12 // indirect + github.com/mattn/go-isatty v0.0.14 // indirect + github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77 // indirect + github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 // indirect + github.com/oklog/run v1.0.0 // indirect + github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect + github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect + github.com/zclconf/go-cty v1.15.0 // indirect + golang.org/x/mod v0.19.0 // indirect + golang.org/x/net v0.33.0 // indirect + golang.org/x/sync v0.10.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/text v0.21.0 // indirect + golang.org/x/tools v0.23.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect + google.golang.org/grpc v1.65.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect +) diff --git a/tflint-ruleset-blueprint/go.sum b/tflint-ruleset-blueprint/go.sum new file mode 100644 index 00000000000..3f4a572476a --- /dev/null +++ b/tflint-ruleset-blueprint/go.sum @@ -0,0 +1,84 @@ +github.com/agext/levenshtein v1.2.1 h1:QmvMAjj2aEICytGiWzmxoE0x2KZvE0fvmqMOfy2tjT8= +github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= +github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= +github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= +github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U= +github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-plugin v1.6.1 h1:P7MR2UP6gNKGPp+y7EZw2kOiq4IR9WiqLvp0XOsVdwI= +github.com/hashicorp/go-plugin v1.6.1/go.mod h1:XPHFku2tFo3o3QKFgSYo+cghcUhw1NA1hZyMK0PWAw0= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/hcl/v2 v2.23.0 h1:Fphj1/gCylPxHutVSEOf2fBOh1VE4AuLV7+kbJf3qos= +github.com/hashicorp/hcl/v2 v2.23.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA= +github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= +github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= +github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= +github.com/jhump/protoreflect v1.15.1/go.mod h1:jD/2GMKKE6OqX8qTjhADU1e6DShO+gavG9e0Q693nKo= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77 h1:7GoSOOW2jpsfkntVKaS2rAr1TJqfcxotyaUcuxoZSzg= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 h1:DpOJ2HYzCv8LZP15IdmG+YdwD2luVPHITV96TkirNBM= +github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/terraform-linters/tflint-plugin-sdk v0.21.0 h1:RoorxuuWh1RuL09PWAmaCKw/hmb9QP5dukGXZiB0fs8= +github.com/terraform-linters/tflint-plugin-sdk v0.21.0/go.mod h1:f7ruoYh44RQvnZRxpWhn8JFkpEVlQFT8wC9MhIF0Rp4= +github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU= +github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= +github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= +github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= +github.com/zclconf/go-cty v1.15.0 h1:tTCRWxsexYUmtt/wVxgDClUe+uQusuI443uL6e+5sXQ= +github.com/zclconf/go-cty v1.15.0/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo= +github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM= +golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= +golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= +golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/tflint-ruleset-blueprint/main.go b/tflint-ruleset-blueprint/main.go new file mode 100644 index 00000000000..79c0201ced4 --- /dev/null +++ b/tflint-ruleset-blueprint/main.go @@ -0,0 +1,21 @@ +package main + +import ( + "github.com/cloud-foundation-toolkit/tflint-ruleset-blueprint/rules" + "github.com/terraform-linters/tflint-plugin-sdk/plugin" + "github.com/terraform-linters/tflint-plugin-sdk/tflint" +) + +func main() { + plugin.Serve(&plugin.ServeOpts{ + RuleSet: &tflint.BuiltinRuleSet{ + Name: "blueprint", + Version: "0.2.4", + Rules: []tflint.Rule{ + rules.NewTerraformDocSamplesRestrictedBlocks(), + rules.NewTerraformDocSamplesRestrictedResources(), + rules.NewTerraformRequiredVersionRange(), + }, + }, + }) +} diff --git a/tflint-ruleset-blueprint/rules/terraform_doc_sample_restricted_blocks.go b/tflint-ruleset-blueprint/rules/terraform_doc_sample_restricted_blocks.go new file mode 100644 index 00000000000..249591b38b3 --- /dev/null +++ b/tflint-ruleset-blueprint/rules/terraform_doc_sample_restricted_blocks.go @@ -0,0 +1,93 @@ +package rules + +import ( + "fmt" + + "github.com/terraform-linters/tflint-plugin-sdk/hclext" + "github.com/terraform-linters/tflint-plugin-sdk/tflint" +) + +// TerraformDocSamplesRestrictedBlocks checks whether restricted blocks are used. +type TerraformDocSamplesRestrictedBlocks struct { + tflint.DefaultRule +} + +// NewTerraformDocSamplesRestrictedBlocks returns a new rule. +func NewTerraformDocSamplesRestrictedBlocks() *TerraformDocSamplesRestrictedBlocks { + return &TerraformDocSamplesRestrictedBlocks{} +} + +// Name returns the rule name. +func (r *TerraformDocSamplesRestrictedBlocks) Name() string { + return "terraform_doc_sample_restricted_blocks" +} + +// Enabled returns whether the rule is enabled by default. +func (r *TerraformDocSamplesRestrictedBlocks) Enabled() bool { + return false +} + +// Severity returns the rule severity. +func (r *TerraformDocSamplesRestrictedBlocks) Severity() tflint.Severity { + return tflint.ERROR +} + +// Link returns the rule reference link +func (r *TerraformDocSamplesRestrictedBlocks) Link() string { + return "https://googlecloudplatform.github.io/samples-style-guide/#language-specific" +} + +const ( + moduleBlockType = "module" + variableBlockType = "variable" +) + +var restrictedBlocks = []string{moduleBlockType, variableBlockType} + +// Check checks whether config contains restricted block types. +func (r *TerraformDocSamplesRestrictedBlocks) Check(runner tflint.Runner) error { + path, err := runner.GetModulePath() + if err != nil { + return err + } + if !path.IsRoot() { + // Each sample must be a root module. + return nil + } + + // Extract restricted blocks if any from config. + restrictedBlocksSchema := make([]hclext.BlockSchema, 0, len(restrictedBlocks)) + for _, rb := range restrictedBlocks { + rs := hclext.BlockSchema{ + Type: rb, + LabelNames: []string{"name"}, + Body: &hclext.BodySchema{}, + } + restrictedBlocksSchema = append(restrictedBlocksSchema, rs) + } + body, err := runner.GetModuleContent(&hclext.BodySchema{ + Blocks: restrictedBlocksSchema, + }, &tflint.GetModuleContentOption{ExpandMode: tflint.ExpandModeNone}) + if err != nil { + return err + } + + // Emit issues if extracted blocks are found. + blocks := body.Blocks.ByType() + for _, rBlockType := range restrictedBlocks { + rBlocks, ok := blocks[rBlockType] + if ok { + for _, rBlock := range rBlocks { + err := runner.EmitIssue( + r, + fmt.Sprintf("doc sample restricted block type %s", rBlockType), + rBlock.DefRange, + ) + if err != nil { + return err + } + } + } + } + return nil +} diff --git a/tflint-ruleset-blueprint/rules/terraform_doc_sample_restricted_blocks_test.go b/tflint-ruleset-blueprint/rules/terraform_doc_sample_restricted_blocks_test.go new file mode 100644 index 00000000000..5345afabfc6 --- /dev/null +++ b/tflint-ruleset-blueprint/rules/terraform_doc_sample_restricted_blocks_test.go @@ -0,0 +1,32 @@ +package rules + +import ( + "path" + "testing" +) + +const ( + restrictedBlocksTestDir = "doc-sample-restricted-blocks" +) + +func TestRestrictedBlocks(t *testing.T) { + tests := []ruleTC{ + { + dir: path.Join(restrictedBlocksTestDir, "valid"), + }, + { + dir: path.Join(restrictedBlocksTestDir, "single-restricted"), + }, + { + dir: path.Join(restrictedBlocksTestDir, "multiple-restricted"), + }, + } + + rule := NewTerraformDocSamplesRestrictedBlocks() + + for _, tc := range tests { + t.Run(tc.dir, func(t *testing.T) { + ruleTest(t, rule, tc) + }) + } +} diff --git a/tflint-ruleset-blueprint/rules/terraform_doc_sample_restricted_resources.go b/tflint-ruleset-blueprint/rules/terraform_doc_sample_restricted_resources.go new file mode 100644 index 00000000000..d3bc52e81be --- /dev/null +++ b/tflint-ruleset-blueprint/rules/terraform_doc_sample_restricted_resources.go @@ -0,0 +1,70 @@ +package rules + +import ( + "fmt" + + "github.com/terraform-linters/tflint-plugin-sdk/hclext" + "github.com/terraform-linters/tflint-plugin-sdk/tflint" +) + +// TerraformDocSamplesRestrictedResources checks whether restricted resources are used. +type TerraformDocSamplesRestrictedResources struct { + tflint.DefaultRule +} + +// NewTerraformDocSamplesRestrictedResources returns a new rule. +func NewTerraformDocSamplesRestrictedResources() *TerraformDocSamplesRestrictedResources { + return &TerraformDocSamplesRestrictedResources{} +} + +// Name returns the rule name. +func (r *TerraformDocSamplesRestrictedResources) Name() string { + return "terraform_doc_sample_restricted_resources" +} + +// Enabled returns whether the rule is enabled by default. +func (r *TerraformDocSamplesRestrictedResources) Enabled() bool { + return false +} + +// Severity returns the rule severity. +func (r *TerraformDocSamplesRestrictedResources) Severity() tflint.Severity { + return tflint.ERROR +} + +// Link returns the rule reference link +func (r *TerraformDocSamplesRestrictedResources) Link() string { + return "https://googlecloudplatform.github.io/samples-style-guide/#language-specific" +} + +const ( + nullResource = "null_resource" +) + +var restrictedResources = []string{nullResource} + +// Check checks whether config contains restricted resource types. +func (r *TerraformDocSamplesRestrictedResources) Check(runner tflint.Runner) error { + path, err := runner.GetModulePath() + if err != nil { + return err + } + if !path.IsRoot() { + // Each sample must be a root module. + return nil + } + + for _, restrictedResource := range restrictedResources { + content, err := runner.GetResourceContent(restrictedResource, &hclext.BodySchema{}, nil) + if err != nil { + return err + } + for _, b := range content.Blocks { + err := runner.EmitIssue(r, fmt.Sprintf("doc sample restricted resource type: %s", restrictedResource), b.DefRange) + if err != nil { + return err + } + } + } + return nil +} diff --git a/tflint-ruleset-blueprint/rules/terraform_doc_sample_restricted_resources_test.go b/tflint-ruleset-blueprint/rules/terraform_doc_sample_restricted_resources_test.go new file mode 100644 index 00000000000..07652540c3d --- /dev/null +++ b/tflint-ruleset-blueprint/rules/terraform_doc_sample_restricted_resources_test.go @@ -0,0 +1,32 @@ +package rules + +import ( + "path" + "testing" +) + +const ( + restrictedResourcesTestDir = "doc-sample-restricted-resources" +) + +func TestRestrictedResources(t *testing.T) { + tests := []ruleTC{ + { + dir: path.Join(restrictedResourcesTestDir, "valid"), + }, + { + dir: path.Join(restrictedResourcesTestDir, "single-invalid"), + }, + { + dir: path.Join(restrictedResourcesTestDir, "multiple-invalid"), + }, + } + + rule := NewTerraformDocSamplesRestrictedResources() + + for _, tc := range tests { + t.Run(tc.dir, func(t *testing.T) { + ruleTest(t, rule, tc) + }) + } +} diff --git a/tflint-ruleset-blueprint/rules/terraform_required_version_range.go b/tflint-ruleset-blueprint/rules/terraform_required_version_range.go new file mode 100644 index 00000000000..066fe15f7e7 --- /dev/null +++ b/tflint-ruleset-blueprint/rules/terraform_required_version_range.go @@ -0,0 +1,175 @@ +package rules + +import ( + "fmt" + "strconv" + "strings" + + "github.com/hashicorp/go-version" + "github.com/hashicorp/hcl/v2/gohcl" + "github.com/terraform-linters/tflint-plugin-sdk/hclext" + "github.com/terraform-linters/tflint-plugin-sdk/logger" + "github.com/terraform-linters/tflint-plugin-sdk/tflint" +) + +// TerraformRequiredVersionRange checks if a module has a terraform required_version within valid range. +type TerraformRequiredVersionRange struct { + tflint.DefaultRule +} + +// TerraformRequiredVersionRangeConfig is a config of TerraformRequiredVersionRange +type TerraformRequiredVersionRangeConfig struct { + MinVersion string `hclext:"min_version,optional"` + MaxVersion string `hclext:"max_version,optional"` +} + +// NewTerraformRequiredVersionRange returns a new rule. +func NewTerraformRequiredVersionRange() *TerraformRequiredVersionRange { + return &TerraformRequiredVersionRange{} +} + +// Name returns the rule name. +func (r *TerraformRequiredVersionRange) Name() string { + return "terraform_required_version_range" +} + +// Enabled returns whether the rule is enabled by default. +func (r *TerraformRequiredVersionRange) Enabled() bool { + return false +} + +// Severity returns the rule severity. +func (r *TerraformRequiredVersionRange) Severity() tflint.Severity { + return tflint.ERROR +} + +// Link returns the rule reference link +func (r *TerraformRequiredVersionRange) Link() string { + return "https://googlecloudplatform.github.io/samples-style-guide/#language-specific" +} + +const ( + minimumTerraformRequiredVersionRange = "1.3" + maximumTerraformRequiredVersionRange = "1.5" +) + +// Checks if a module has a terraform required_version within valid range. +func (r *TerraformRequiredVersionRange) Check(runner tflint.Runner) error { + config := &TerraformRequiredVersionRangeConfig{} + if err := runner.DecodeRuleConfig(r.Name(), config); err != nil { + return err + } + + minVersion := minimumTerraformRequiredVersionRange + if config.MinVersion != "" { + if _, err := version.NewSemver(config.MinVersion); err != nil { + return err + } + minVersion = config.MinVersion + } + + maxVersion := maximumTerraformRequiredVersionRange + if config.MaxVersion != "" { + if _, err := version.NewSemver(config.MaxVersion); err != nil { + return err + } + maxVersion = config.MaxVersion + } + + logger.Info(fmt.Sprintf("Running with min_version: %q max_version: %q", minVersion, maxVersion)) + + splitVersion := strings.Split(minVersion, ".") + majorVersion, err := strconv.Atoi(splitVersion[0]) + if err != nil { + return err + } + minorVersion, err := strconv.Atoi(splitVersion[1]) + if err != nil { + return err + } + + var terraform_below_minimum_required_version string + if minorVersion > 0 { + terraform_below_minimum_required_version = fmt.Sprintf( + "v%d.%d.999", + majorVersion, + minorVersion - 1, + ) + } else { + if majorVersion == 0 { + return fmt.Errorf("Error: minimum version test constraint would be below zero: v%d.%d.999", majorVersion - 1, 999) + } + terraform_below_minimum_required_version = fmt.Sprintf( + "v%d.%d.999", + majorVersion - 1, + 999, + ) + } + + below_required_version, err := version.NewVersion(terraform_below_minimum_required_version) + if err != nil { + return err + } + + minimum_required_version, err := version.NewVersion(minVersion) + if err != nil { + return err + } + + maximum_required_version, err := version.NewVersion(maxVersion) + if err != nil { + return err + } + + path, err := runner.GetModulePath() + if err != nil { + return err + } + + if !path.IsRoot() { + return nil + } + + content, err := runner.GetModuleContent(&hclext.BodySchema{ + Blocks: []hclext.BlockSchema{ + { + Type: "terraform", + Body: &hclext.BodySchema{ + Attributes: []hclext.AttributeSchema{{Name: "required_version"}}, + }, + }, + }, + }, &tflint.GetModuleContentOption{ExpandMode: tflint.ExpandModeNone}) + if err != nil { + return err + } + + for _, block := range content.Blocks { + requiredVersion, exists := block.Body.Attributes["required_version"] + if !exists { + logger.Info(fmt.Sprintf("terraform block does not contain required_version: %s", block.DefRange)) + continue + } + + var raw_terraform_required_version string + diags := gohcl.DecodeExpression(requiredVersion.Expr, nil, &raw_terraform_required_version) + if diags.HasErrors() { + return fmt.Errorf("failed to decode terraform block required_version: %v", diags.Error()) + } + + constraints, err := version.NewConstraint(raw_terraform_required_version) + if err != nil { + return err + } + + if !((constraints.Check(minimum_required_version) || constraints.Check(maximum_required_version)) && !constraints.Check(below_required_version)) { + //TODO: use EmitIssueWithFix() + err := runner.EmitIssue(r, fmt.Sprintf("required_version is not inclusive of the the minimum %q and maximum %q terraform required_version: %q", minVersion, maxVersion, constraints.String()), block.DefRange) + if err != nil { + return err + } + } + } + + return nil +} diff --git a/tflint-ruleset-blueprint/rules/terraform_required_version_range_test.go b/tflint-ruleset-blueprint/rules/terraform_required_version_range_test.go new file mode 100644 index 00000000000..120896387f6 --- /dev/null +++ b/tflint-ruleset-blueprint/rules/terraform_required_version_range_test.go @@ -0,0 +1,38 @@ +package rules + +import ( + "path" + "testing" +) + +const ( + TerraformRequiredVersionRangeTestDir = "terraform_required_version_range" +) + +func TestTerraformMinimumRequiredVersionRange(t *testing.T) { + tests := []ruleTC{ + { + dir: path.Join(TerraformRequiredVersionRangeTestDir, "multiple-valid"), + }, + { + dir: path.Join(TerraformRequiredVersionRangeTestDir, "multiple-invalid"), + }, + { + dir: path.Join(TerraformRequiredVersionRangeTestDir, "multiple-valid-config"), + }, + { + dir: path.Join(TerraformRequiredVersionRangeTestDir, "multiple-invalid-config"), + }, + { + dir: path.Join(TerraformRequiredVersionRangeTestDir, "multiple-valid-config-single"), + }, + } + + rule := NewTerraformRequiredVersionRange() + + for _, tc := range tests { + t.Run(tc.dir, func(t *testing.T) { + ruleTest(t, rule, tc) + }) + } +} diff --git a/tflint-ruleset-blueprint/rules/testdata/doc-sample-restricted-blocks/multiple-restricted/.expected/issues.json b/tflint-ruleset-blueprint/rules/testdata/doc-sample-restricted-blocks/multiple-restricted/.expected/issues.json new file mode 100644 index 00000000000..85f84bad5c0 --- /dev/null +++ b/tflint-ruleset-blueprint/rules/testdata/doc-sample-restricted-blocks/multiple-restricted/.expected/issues.json @@ -0,0 +1,70 @@ +[ + { + "Rule": null, + "Message": "doc sample restricted block type module", + "Range": { + "Filename": "main.tf", + "Start": { + "Line": 22, + "Column": 1, + "Byte": 669 + }, + "End": { + "Line": 22, + "Column": 18, + "Byte": 686 + } + } + }, + { + "Rule": null, + "Message": "doc sample restricted block type variable", + "Range": { + "Filename": "variables.tf", + "Start": { + "Line": 1, + "Column": 1, + "Byte": 0 + }, + "End": { + "Line": 1, + "Column": 22, + "Byte": 21 + } + } + }, + { + "Rule": null, + "Message": "doc sample restricted block type variable", + "Range": { + "Filename": "variables.tf", + "Start": { + "Line": 5, + "Column": 1, + "Byte": 30 + }, + "End": { + "Line": 5, + "Column": 22, + "Byte": 51 + } + } + }, + { + "Rule": null, + "Message": "doc sample restricted block type variable", + "Range": { + "Filename": "variables.tf", + "Start": { + "Line": 9, + "Column": 1, + "Byte": 60 + }, + "End": { + "Line": 9, + "Column": 16, + "Byte": 75 + } + } + } +] diff --git a/tflint-ruleset-blueprint/rules/testdata/doc-sample-restricted-blocks/multiple-restricted/main.tf b/tflint-ruleset-blueprint/rules/testdata/doc-sample-restricted-blocks/multiple-restricted/main.tf new file mode 100644 index 00000000000..45c98436b9f --- /dev/null +++ b/tflint-ruleset-blueprint/rules/testdata/doc-sample-restricted-blocks/multiple-restricted/main.tf @@ -0,0 +1,52 @@ +# https://github.com/terraform-google-modules/terraform-docs-samples/blob/main/bigquery/bigquery_create_view/main.tf + +# [START bigquery_create_view] +resource "google_bigquery_dataset" "default" { + dataset_id = var.dataset_id + default_partition_expiration_ms = var.expr + default_table_expiration_ms = 31536000000 # 365 days + description = "dataset description" + location = "US" + max_time_travel_hours = 96 # 4 days + + labels = { + billing_group = "accounting", + pii = "sensitive" + } +} + +output "creation_time" { + value = google_bigquery_dataset.default.creation_time +} + +module "bigquery" { + source = "terraform-google-modules/bigquery/google" + version = "~> 7.0" + + dataset_id = "foo" + dataset_name = "foo" + description = "some description" + project_id = var.project_id + location = "US" + delete_contents_on_destroy = true + tables = [ + { + table_id = "bar", + time_partitioning = null, + range_partitioning = null, + expiration_time = 2524604400000, # 2050/01/01 + clustering = [], + labels = { + env = "devops" + billable = "true" + owner = "joedoe" + }, + } + ] + dataset_labels = { + env = "dev" + billable = "true" + owner = "janesmith" + } +} +# [END bigquery_create_view] diff --git a/tflint-ruleset-blueprint/rules/testdata/doc-sample-restricted-blocks/multiple-restricted/variables.tf b/tflint-ruleset-blueprint/rules/testdata/doc-sample-restricted-blocks/multiple-restricted/variables.tf new file mode 100644 index 00000000000..2041b8eca8c --- /dev/null +++ b/tflint-ruleset-blueprint/rules/testdata/doc-sample-restricted-blocks/multiple-restricted/variables.tf @@ -0,0 +1,11 @@ +variable "project_id" { + +} + +variable "dataset_id" { + +} + +variable "expr" { + default = 123 +} diff --git a/tflint-ruleset-blueprint/rules/testdata/doc-sample-restricted-blocks/single-restricted/.expected/issues.json b/tflint-ruleset-blueprint/rules/testdata/doc-sample-restricted-blocks/single-restricted/.expected/issues.json new file mode 100644 index 00000000000..ba172f9ab9a --- /dev/null +++ b/tflint-ruleset-blueprint/rules/testdata/doc-sample-restricted-blocks/single-restricted/.expected/issues.json @@ -0,0 +1,19 @@ +[ + { + "Rule": null, + "Message": "doc sample restricted block type variable", + "Range": { + "Filename": "main.tf", + "Start": { + "Line": 3, + "Column": 1, + "Byte": 118 + }, + "End": { + "Line": 3, + "Column": 22, + "Byte": 139 + } + } + } +] diff --git a/tflint-ruleset-blueprint/rules/testdata/doc-sample-restricted-blocks/single-restricted/main.tf b/tflint-ruleset-blueprint/rules/testdata/doc-sample-restricted-blocks/single-restricted/main.tf new file mode 100644 index 00000000000..3158bf82845 --- /dev/null +++ b/tflint-ruleset-blueprint/rules/testdata/doc-sample-restricted-blocks/single-restricted/main.tf @@ -0,0 +1,21 @@ +# https://github.com/terraform-google-modules/terraform-docs-samples/blob/main/bigquery/bigquery_create_view/main.tf + +variable "dataset_id" { + +} + +# [START bigquery_create_view] +resource "google_bigquery_dataset" "default" { + dataset_id = var.dataset_id + default_partition_expiration_ms = 2592000000 # 30 days + default_table_expiration_ms = 31536000000 # 365 days + description = "dataset description" + location = "US" + max_time_travel_hours = 96 # 4 days + + labels = { + billing_group = "accounting", + pii = "sensitive" + } +} +# [END bigquery_create_view] diff --git a/tflint-ruleset-blueprint/rules/testdata/doc-sample-restricted-blocks/valid/.expected/issues.json b/tflint-ruleset-blueprint/rules/testdata/doc-sample-restricted-blocks/valid/.expected/issues.json new file mode 100644 index 00000000000..fe51488c706 --- /dev/null +++ b/tflint-ruleset-blueprint/rules/testdata/doc-sample-restricted-blocks/valid/.expected/issues.json @@ -0,0 +1 @@ +[] diff --git a/tflint-ruleset-blueprint/rules/testdata/doc-sample-restricted-blocks/valid/main.tf b/tflint-ruleset-blueprint/rules/testdata/doc-sample-restricted-blocks/valid/main.tf new file mode 100644 index 00000000000..357f3a46bd9 --- /dev/null +++ b/tflint-ruleset-blueprint/rules/testdata/doc-sample-restricted-blocks/valid/main.tf @@ -0,0 +1,29 @@ +# https://github.com/terraform-google-modules/terraform-docs-samples/blob/main/bigquery/bigquery_create_view/main.tf + +# [START bigquery_create_view] +resource "google_bigquery_dataset" "default" { + dataset_id = "mydataset" + default_partition_expiration_ms = 2592000000 # 30 days + default_table_expiration_ms = 31536000000 # 365 days + description = "dataset description" + location = "US" + max_time_travel_hours = 96 # 4 days + + labels = { + billing_group = "accounting", + pii = "sensitive" + } +} + +resource "google_bigquery_table" "default" { + dataset_id = google_bigquery_dataset.default.dataset_id + table_id = "myview" + deletion_protection = false # set to "true" in production + + view { + query = "SELECT global_id, faa_identifier, name, latitude, longitude FROM `bigquery-public-data.faa.us_airports`" + use_legacy_sql = false + } + +} +# [END bigquery_create_view] diff --git a/tflint-ruleset-blueprint/rules/testdata/doc-sample-restricted-resources/multiple-invalid/.expected/issues.json b/tflint-ruleset-blueprint/rules/testdata/doc-sample-restricted-resources/multiple-invalid/.expected/issues.json new file mode 100644 index 00000000000..e1de1d34e5b --- /dev/null +++ b/tflint-ruleset-blueprint/rules/testdata/doc-sample-restricted-resources/multiple-invalid/.expected/issues.json @@ -0,0 +1,36 @@ +[ + { + "Rule": null, + "Message": "doc sample restricted resource type: null_resource", + "Range": { + "Filename": "main.tf", + "Start": { + "Line": 1, + "Column": 1, + "Byte": 0 + }, + "End": { + "Line": 1, + "Column": 31, + "Byte": 30 + } + } + }, + { + "Rule": null, + "Message": "doc sample restricted resource type: null_resource", + "Range": { + "Filename": "main.tf", + "Start": { + "Line": 5, + "Column": 1, + "Byte": 39 + }, + "End": { + "Line": 5, + "Column": 31, + "Byte": 69 + } + } + } +] diff --git a/tflint-ruleset-blueprint/rules/testdata/doc-sample-restricted-resources/multiple-invalid/main.tf b/tflint-ruleset-blueprint/rules/testdata/doc-sample-restricted-resources/multiple-invalid/main.tf new file mode 100644 index 00000000000..f2e5c5bf205 --- /dev/null +++ b/tflint-ruleset-blueprint/rules/testdata/doc-sample-restricted-resources/multiple-invalid/main.tf @@ -0,0 +1,7 @@ +resource "null_resource" "nr1" { + +} + +resource "null_resource" "nr2" { + +} diff --git a/tflint-ruleset-blueprint/rules/testdata/doc-sample-restricted-resources/single-invalid/.expected/issues.json b/tflint-ruleset-blueprint/rules/testdata/doc-sample-restricted-resources/single-invalid/.expected/issues.json new file mode 100644 index 00000000000..42ed4bfb621 --- /dev/null +++ b/tflint-ruleset-blueprint/rules/testdata/doc-sample-restricted-resources/single-invalid/.expected/issues.json @@ -0,0 +1,19 @@ +[ + { + "Rule": null, + "Message": "doc sample restricted resource type: null_resource", + "Range": { + "Filename": "main.tf", + "Start": { + "Line": 1, + "Column": 1, + "Byte": 0 + }, + "End": { + "Line": 1, + "Column": 31, + "Byte": 30 + } + } + } +] diff --git a/tflint-ruleset-blueprint/rules/testdata/doc-sample-restricted-resources/single-invalid/main.tf b/tflint-ruleset-blueprint/rules/testdata/doc-sample-restricted-resources/single-invalid/main.tf new file mode 100644 index 00000000000..1afe5685bad --- /dev/null +++ b/tflint-ruleset-blueprint/rules/testdata/doc-sample-restricted-resources/single-invalid/main.tf @@ -0,0 +1,6 @@ +resource "null_resource" "nr1" { + +} + +resource "random_pet" "p" { +} diff --git a/tflint-ruleset-blueprint/rules/testdata/doc-sample-restricted-resources/valid/.expected/issues.json b/tflint-ruleset-blueprint/rules/testdata/doc-sample-restricted-resources/valid/.expected/issues.json new file mode 100644 index 00000000000..fe51488c706 --- /dev/null +++ b/tflint-ruleset-blueprint/rules/testdata/doc-sample-restricted-resources/valid/.expected/issues.json @@ -0,0 +1 @@ +[] diff --git a/tflint-ruleset-blueprint/rules/testdata/doc-sample-restricted-resources/valid/main.tf b/tflint-ruleset-blueprint/rules/testdata/doc-sample-restricted-resources/valid/main.tf new file mode 100644 index 00000000000..f677ae6e844 --- /dev/null +++ b/tflint-ruleset-blueprint/rules/testdata/doc-sample-restricted-resources/valid/main.tf @@ -0,0 +1,2 @@ +resource "random_pet" "p" { +} diff --git a/tflint-ruleset-blueprint/rules/testdata/terraform_required_version_range/multiple-invalid-config/.expected/issues.json b/tflint-ruleset-blueprint/rules/testdata/terraform_required_version_range/multiple-invalid-config/.expected/issues.json new file mode 100644 index 00000000000..0b2c3ff98ef --- /dev/null +++ b/tflint-ruleset-blueprint/rules/testdata/terraform_required_version_range/multiple-invalid-config/.expected/issues.json @@ -0,0 +1,128 @@ +[ + { + "Message": "required_version is not inclusive of the the minimum \"1.6\" and maximum \"1.9\" terraform required_version: \">= 1\"", + "Range": { + "Filename": "main.tf", + "Start": { + "Line": 1, + "Column": 1 + }, + "End": { + "Line": 1, + "Column": 10 + } + } + }, + { + "Message": "required_version is not inclusive of the the minimum \"1.6\" and maximum \"1.9\" terraform required_version: \">= 1.1\"", + "Range": { + "Filename": "main.tf", + "Start": { + "Line": 5, + "Column": 1 + }, + "End": { + "Line": 5, + "Column": 10 + } + } + }, + { + "Message": "required_version is not inclusive of the the minimum \"1.6\" and maximum \"1.9\" terraform required_version: \">= 1.1.0\"", + "Range": { + "Filename": "main.tf", + "Start": { + "Line": 9, + "Column": 1 + }, + "End": { + "Line": 9, + "Column": 10 + } + } + }, + { + "Message": "required_version is not inclusive of the the minimum \"1.6\" and maximum \"1.9\" terraform required_version: \">=1.1.0\"", + "Range": { + "Filename": "main.tf", + "Start": { + "Line": 13, + "Column": 1 + }, + "End": { + "Line": 13, + "Column": 10 + } + } + }, + { + "Message": "required_version is not inclusive of the the minimum \"1.6\" and maximum \"1.9\" terraform required_version: \">= 1.1.0, < 2.0\"", + "Range": { + "Filename": "main.tf", + "Start": { + "Line": 17, + "Column": 1 + }, + "End": { + "Line": 17, + "Column": 10 + } + } + }, + { + "Message": "required_version is not inclusive of the the minimum \"1.6\" and maximum \"1.9\" terraform required_version: \">=0.13.0\"", + "Range": { + "Filename": "main.tf", + "Start": { + "Line": 21, + "Column": 1 + }, + "End": { + "Line": 21, + "Column": 10 + } + } + }, + { + "Message": "required_version is not inclusive of the the minimum \"1.6\" and maximum \"1.9\" terraform required_version: \"=0.13.0\"", + "Range": { + "Filename": "main.tf", + "Start": { + "Line": 25, + "Column": 1 + }, + "End": { + "Line": 25, + "Column": 10 + } + } + }, + { + "Message": "required_version is not inclusive of the the minimum \"1.6\" and maximum \"1.9\" terraform required_version: \"0.13.0\"", + "Range": { + "Filename": "main.tf", + "Start": { + "Line": 29, + "Column": 1 + }, + "End": { + "Line": 29, + "Column": 10 + } + } + }, + { + "Message": "required_version is not inclusive of the the minimum \"1.6\" and maximum \"1.9\" terraform required_version: \"~>0.13.0\"", + "Range": { + "Filename": "main.tf", + "Start": { + "Line": 33, + "Column": 1 + }, + "End": { + "Line": 33, + "Column": 10 + } + } + } +] diff --git a/tflint-ruleset-blueprint/rules/testdata/terraform_required_version_range/multiple-invalid-config/.tflint.hcl b/tflint-ruleset-blueprint/rules/testdata/terraform_required_version_range/multiple-invalid-config/.tflint.hcl new file mode 100644 index 00000000000..5844e3c25ee --- /dev/null +++ b/tflint-ruleset-blueprint/rules/testdata/terraform_required_version_range/multiple-invalid-config/.tflint.hcl @@ -0,0 +1,5 @@ +rule "terraform_required_version_range" { + enabled = true + min_version = "1.6" + max_version = "1.9" +} diff --git a/tflint-ruleset-blueprint/rules/testdata/terraform_required_version_range/multiple-invalid-config/main.tf b/tflint-ruleset-blueprint/rules/testdata/terraform_required_version_range/multiple-invalid-config/main.tf new file mode 100644 index 00000000000..1a50f6c00f0 --- /dev/null +++ b/tflint-ruleset-blueprint/rules/testdata/terraform_required_version_range/multiple-invalid-config/main.tf @@ -0,0 +1,35 @@ +terraform { + required_version = ">= 1" +} + +terraform { + required_version = ">= 1.1" +} + +terraform { + required_version = ">= 1.1.0" +} + +terraform { + required_version = ">=1.1.0" +} + +terraform { + required_version = ">= 1.1.0, < 2.0" +} + +terraform { + required_version = ">=0.13.0" +} + +terraform { + required_version = "=0.13.0" +} + +terraform { + required_version = "0.13.0" +} + +terraform { + required_version = "~>0.13.0" +} diff --git a/tflint-ruleset-blueprint/rules/testdata/terraform_required_version_range/multiple-invalid/.expected/issues.json b/tflint-ruleset-blueprint/rules/testdata/terraform_required_version_range/multiple-invalid/.expected/issues.json new file mode 100644 index 00000000000..bbc81408b49 --- /dev/null +++ b/tflint-ruleset-blueprint/rules/testdata/terraform_required_version_range/multiple-invalid/.expected/issues.json @@ -0,0 +1,170 @@ +[ + { + "Message": "required_version is not inclusive of the the minimum \"1.3\" and maximum \"1.5\" terraform required_version: \">= 1\"", + "Range": { + "Filename": "main.tf", + "Start": { + "Line": 1, + "Column": 1 + }, + "End": { + "Line": 1, + "Column": 10 + } + } + }, + { + "Message": "required_version is not inclusive of the the minimum \"1.3\" and maximum \"1.5\" terraform required_version: \">= 1.1\"", + "Range": { + "Filename": "main.tf", + "Start": { + "Line": 5, + "Column": 1 + }, + "End": { + "Line": 5, + "Column": 10 + } + } + }, + { + "Message": "required_version is not inclusive of the the minimum \"1.3\" and maximum \"1.5\" terraform required_version: \">= 1.1.0\"", + "Range": { + "Filename": "main.tf", + "Start": { + "Line": 9, + "Column": 1 + }, + "End": { + "Line": 9, + "Column": 10 + } + } + }, + { + "Message": "required_version is not inclusive of the the minimum \"1.3\" and maximum \"1.5\" terraform required_version: \">=1.1.0\"", + "Range": { + "Filename": "main.tf", + "Start": { + "Line": 13, + "Column": 1 + }, + "End": { + "Line": 13, + "Column": 10 + } + } + }, + { + "Message": "required_version is not inclusive of the the minimum \"1.3\" and maximum \"1.5\" terraform required_version: \">= 1.1.0, < 2.0\"", + "Range": { + "Filename": "main.tf", + "Start": { + "Line": 17, + "Column": 1 + }, + "End": { + "Line": 17, + "Column": 10 + } + } + }, + { + "Message": "required_version is not inclusive of the the minimum \"1.3\" and maximum \"1.5\" terraform required_version: \">=0.13.0\"", + "Range": { + "Filename": "main.tf", + "Start": { + "Line": 21, + "Column": 1 + }, + "End": { + "Line": 21, + "Column": 10 + } + } + }, + { + "Message": "required_version is not inclusive of the the minimum \"1.3\" and maximum \"1.5\" terraform required_version: \"=0.13.0\"", + "Range": { + "Filename": "main.tf", + "Start": { + "Line": 25, + "Column": 1 + }, + "End": { + "Line": 25, + "Column": 10 + } + } + }, + { + "Message": "required_version is not inclusive of the the minimum \"1.3\" and maximum \"1.5\" terraform required_version: \"0.13.0\"", + "Range": { + "Filename": "main.tf", + "Start": { + "Line": 29, + "Column": 1 + }, + "End": { + "Line": 29, + "Column": 10 + } + } + }, + { + "Message": "required_version is not inclusive of the the minimum \"1.3\" and maximum \"1.5\" terraform required_version: \">= 1.6.0\"", + "Range": { + "Filename": "main.tf", + "Start": { + "Line": 33, + "Column": 1 + }, + "End": { + "Line": 33, + "Column": 10 + } + } + }, + { + "Message": "required_version is not inclusive of the the minimum \"1.3\" and maximum \"1.5\" terraform required_version: \">= 1.6.0, < 2.0\"", + "Range": { + "Filename": "main.tf", + "Start": { + "Line": 37, + "Column": 1 + }, + "End": { + "Line": 37, + "Column": 10 + } + } + }, + { + "Message": "required_version is not inclusive of the the minimum \"1.3\" and maximum \"1.5\" terraform required_version: \"~>1.6\"", + "Range": { + "Filename": "main.tf", + "Start": { + "Line": 41, + "Column": 1 + }, + "End": { + "Line": 41, + "Column": 10 + } + } + }, + { + "Message": "required_version is not inclusive of the the minimum \"1.3\" and maximum \"1.5\" terraform required_version: \"~>0.13.0\"", + "Range": { + "Filename": "main.tf", + "Start": { + "Line": 45, + "Column": 1 + }, + "End": { + "Line": 45, + "Column": 10 + } + } + } +] diff --git a/tflint-ruleset-blueprint/rules/testdata/terraform_required_version_range/multiple-invalid/main.tf b/tflint-ruleset-blueprint/rules/testdata/terraform_required_version_range/multiple-invalid/main.tf new file mode 100644 index 00000000000..004cf3e2edd --- /dev/null +++ b/tflint-ruleset-blueprint/rules/testdata/terraform_required_version_range/multiple-invalid/main.tf @@ -0,0 +1,47 @@ +terraform { + required_version = ">= 1" +} + +terraform { + required_version = ">= 1.1" +} + +terraform { + required_version = ">= 1.1.0" +} + +terraform { + required_version = ">=1.1.0" +} + +terraform { + required_version = ">= 1.1.0, < 2.0" +} + +terraform { + required_version = ">=0.13.0" +} + +terraform { + required_version = "=0.13.0" +} + +terraform { + required_version = "0.13.0" +} + +terraform { + required_version = ">= 1.6.0" +} + +terraform { + required_version = ">= 1.6.0, < 2.0" +} + +terraform { + required_version = "~>1.6" +} + +terraform { + required_version = "~>0.13.0" +} diff --git a/tflint-ruleset-blueprint/rules/testdata/terraform_required_version_range/multiple-valid-config-single/.expected/issues.json b/tflint-ruleset-blueprint/rules/testdata/terraform_required_version_range/multiple-valid-config-single/.expected/issues.json new file mode 100644 index 00000000000..fe51488c706 --- /dev/null +++ b/tflint-ruleset-blueprint/rules/testdata/terraform_required_version_range/multiple-valid-config-single/.expected/issues.json @@ -0,0 +1 @@ +[] diff --git a/tflint-ruleset-blueprint/rules/testdata/terraform_required_version_range/multiple-valid-config-single/.tflint.hcl b/tflint-ruleset-blueprint/rules/testdata/terraform_required_version_range/multiple-valid-config-single/.tflint.hcl new file mode 100644 index 00000000000..2b138f4a409 --- /dev/null +++ b/tflint-ruleset-blueprint/rules/testdata/terraform_required_version_range/multiple-valid-config-single/.tflint.hcl @@ -0,0 +1,4 @@ +rule "terraform_required_version_range" { + enabled = true + max_version = "1.6" +} diff --git a/tflint-ruleset-blueprint/rules/testdata/terraform_required_version_range/multiple-valid-config-single/main.tf b/tflint-ruleset-blueprint/rules/testdata/terraform_required_version_range/multiple-valid-config-single/main.tf new file mode 100644 index 00000000000..730b2432247 --- /dev/null +++ b/tflint-ruleset-blueprint/rules/testdata/terraform_required_version_range/multiple-valid-config-single/main.tf @@ -0,0 +1,11 @@ +terraform { + required_version = ">=1.3" +} + +terraform { + required_version = ">= 1.3, < 2.0" +} + +terraform { + required_version = "~>1.6" +} diff --git a/tflint-ruleset-blueprint/rules/testdata/terraform_required_version_range/multiple-valid-config/.expected/issues.json b/tflint-ruleset-blueprint/rules/testdata/terraform_required_version_range/multiple-valid-config/.expected/issues.json new file mode 100644 index 00000000000..fe51488c706 --- /dev/null +++ b/tflint-ruleset-blueprint/rules/testdata/terraform_required_version_range/multiple-valid-config/.expected/issues.json @@ -0,0 +1 @@ +[] diff --git a/tflint-ruleset-blueprint/rules/testdata/terraform_required_version_range/multiple-valid-config/.tflint.hcl b/tflint-ruleset-blueprint/rules/testdata/terraform_required_version_range/multiple-valid-config/.tflint.hcl new file mode 100644 index 00000000000..5844e3c25ee --- /dev/null +++ b/tflint-ruleset-blueprint/rules/testdata/terraform_required_version_range/multiple-valid-config/.tflint.hcl @@ -0,0 +1,5 @@ +rule "terraform_required_version_range" { + enabled = true + min_version = "1.6" + max_version = "1.9" +} diff --git a/tflint-ruleset-blueprint/rules/testdata/terraform_required_version_range/multiple-valid-config/main.tf b/tflint-ruleset-blueprint/rules/testdata/terraform_required_version_range/multiple-valid-config/main.tf new file mode 100644 index 00000000000..39548f0e887 --- /dev/null +++ b/tflint-ruleset-blueprint/rules/testdata/terraform_required_version_range/multiple-valid-config/main.tf @@ -0,0 +1,25 @@ +terraform { + required_version = ">=1.6" +} + +terraform { + required_version = ">= 1.6, < 2.0" +} + +terraform { + required_version = "~>1.6" +} + +terraform { + required_version = "~>1.9" +} + +terraform { + backend "gcs" { + bucket = "UPDATE_ME" + prefix = "UPDATE_ME" + } +} + +terraform { +} diff --git a/tflint-ruleset-blueprint/rules/testdata/terraform_required_version_range/multiple-valid/.expected/issues.json b/tflint-ruleset-blueprint/rules/testdata/terraform_required_version_range/multiple-valid/.expected/issues.json new file mode 100644 index 00000000000..fe51488c706 --- /dev/null +++ b/tflint-ruleset-blueprint/rules/testdata/terraform_required_version_range/multiple-valid/.expected/issues.json @@ -0,0 +1 @@ +[] diff --git a/tflint-ruleset-blueprint/rules/testdata/terraform_required_version_range/multiple-valid/main.tf b/tflint-ruleset-blueprint/rules/testdata/terraform_required_version_range/multiple-valid/main.tf new file mode 100644 index 00000000000..f80548fcffc --- /dev/null +++ b/tflint-ruleset-blueprint/rules/testdata/terraform_required_version_range/multiple-valid/main.tf @@ -0,0 +1,47 @@ +terraform { + required_version = ">=1.3" +} + +terraform { + required_version = ">=1.3.0" +} + +terraform { + required_version = ">=v1.3" +} + +terraform { + required_version = ">=1.4" +} + +terraform { + required_version = ">= 1.3" +} + +terraform { + required_version = ">= v1.3" +} + +terraform { + required_version = ">=1.3, <2.0" +} + +terraform { + required_version = ">= 1.3, < 2.0" +} + +terraform { + required_version = "=1.3" +} + +terraform { + required_version = "1.3" +} + +terraform { + required_version = "~>1.3" +} + +terraform { + required_version = "~>1.5" +} diff --git a/tflint-ruleset-blueprint/rules/utils_test.go b/tflint-ruleset-blueprint/rules/utils_test.go new file mode 100644 index 00000000000..81c35967d76 --- /dev/null +++ b/tflint-ruleset-blueprint/rules/utils_test.go @@ -0,0 +1,138 @@ +package rules + +import ( + "encoding/json" + "io/fs" + "os" + "path" + "path/filepath" + "slices" + "strings" + "testing" + + "github.com/terraform-linters/tflint-plugin-sdk/helper" + "github.com/terraform-linters/tflint-plugin-sdk/tflint" +) + +const ( + testdataDir = "testdata" + expectedSuffix = ".expected" + updateEnvVar = "UPDATE_EXPECTED" + issueFile = "issues.json" +) + +var validExtensions = []string{".tf", ".hcl"} + +// ruleTC is a single rule test case. +type ruleTC struct { + // Dir with root module to be tested. + dir string +} + +// ruleTest tests rule r with test case rt. +func ruleTest(t *testing.T, r tflint.Rule, rt ruleTC) { + t.Helper() + + config := configForTest(t, rt.dir) + runner := helper.TestRunner(t, config) + + if err := r.Check(runner); err != nil { + t.Fatalf("rule check: %s", err) + } + + expected := path.Join(testdataDir, rt.dir, expectedSuffix, issueFile) + updateExpected(t, expected, issuesToJSON(t, runner.Issues)) + wantIssues := issuesFromJSON(t, expected) + helper.AssertIssues(t, wantIssues, runner.Issues) +} + +// configForTest returns a map of TF configs paths to data stored in subdir. +// Paths are relative to subdir. +func configForTest(t *testing.T, subdir string) map[string]string { + t.Helper() + + modDir := path.Join(testdataDir, subdir) + configs := map[string]string{} + err := filepath.WalkDir(modDir, func(fp string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + // ignore hidden dirs + if d.IsDir() && strings.HasPrefix(d.Name(), ".") { + return filepath.SkipDir + } + if !d.IsDir() && slices.Contains(validExtensions, path.Ext(fp)) { + relPath, err := filepath.Rel(modDir, fp) + if err != nil { + return err + } + cfg, err := os.ReadFile(fp) + if err != nil { + return err + } + configs[relPath] = string(cfg) + } + return nil + }) + if err != nil { + t.Fatalf("fetching testdata: %v", err) + } + return configs +} + +// issuesFromJSON converts file at fp to a helper issues. +func issuesFromJSON(t *testing.T, fp string) helper.Issues { + t.Helper() + + issues := helper.Issues{} + data, err := os.ReadFile(fp) + if err != nil { + t.Fatalf("reading issues: %v", err) + } + err = json.Unmarshal(data, &issues) + if err != nil { + t.Fatalf("unmarshalling issues: %v", err) + } + return issues +} + +// issuesToJSON marshals issues to json bytes ignoring issue rule. +func issuesToJSON(t *testing.T, issues helper.Issues) []byte { + t.Helper() + + // Workaround for unmarshal error of rule interface. + for _, i := range issues { + i.Rule = nil + } + data, err := json.MarshalIndent(issues, "", " ") + if err != nil { + t.Fatalf("marshalling issues: %v", err) + } + data = append(data, "\n"...) + return data +} + +// UpdateExpected updates expected file at fp with data with update env var is set. +func updateExpected(t *testing.T, fp string, data []byte) { + t.Helper() + + if strings.ToLower(os.Getenv(updateEnvVar)) != "true" { + return + } + err := os.MkdirAll(path.Dir(fp), os.ModePerm) + if err != nil { + t.Fatalf("updating result: %v", err) + } + + if _, err := os.Stat(fp); os.IsNotExist(err) { + _, err := os.Create(fp) + if err != nil { + t.Fatalf("creating %s: %v", fp, err) + } + } + + err = os.WriteFile(fp, data, os.ModePerm) + if err != nil { + t.Fatalf("updating result: %v", err) + } +}