diff --git a/.github/auto-assign-bat.yml b/.github/auto-assign-bat.yml index f61b0b51a..d7a432d1b 100644 --- a/.github/auto-assign-bat.yml +++ b/.github/auto-assign-bat.yml @@ -8,4 +8,4 @@ filterLabels: assignees: - ekcasey - hone -- samj1912 +- sambhav diff --git a/.github/auto-assign-learning.yml b/.github/auto-assign-learning.yml index ca2e089f1..08e981ca5 100644 --- a/.github/auto-assign-learning.yml +++ b/.github/auto-assign-learning.yml @@ -8,4 +8,3 @@ filterLabels: assignees: - jkutner - jromero -- samj1912 diff --git a/.github/workflows/auto-assign-maintainer.yml b/.github/workflows/auto-assign-maintainer.yml index 80b2ec846..292bae4e7 100644 --- a/.github/workflows/auto-assign-maintainer.yml +++ b/.github/workflows/auto-assign-maintainer.yml @@ -14,7 +14,7 @@ jobs: runs-on: - ubuntu-latest steps: - - uses: kentaro-m/auto-assign-action@v1.2.4 + - uses: kentaro-m/auto-assign-action@v1.2.5 with: configuration-path: .github/auto-assign-core.yml distribution: @@ -23,7 +23,7 @@ jobs: runs-on: - ubuntu-latest steps: - - uses: kentaro-m/auto-assign-action@v1.2.4 + - uses: kentaro-m/auto-assign-action@v1.2.5 with: configuration-path: .github/auto-assign-distribution.yml implementation: @@ -32,7 +32,7 @@ jobs: runs-on: - ubuntu-latest steps: - - uses: kentaro-m/auto-assign-action@v1.2.4 + - uses: kentaro-m/auto-assign-action@v1.2.5 with: configuration-path: .github/auto-assign-implementation.yml learning: @@ -41,7 +41,7 @@ jobs: runs-on: - ubuntu-latest steps: - - uses: kentaro-m/auto-assign-action@v1.2.4 + - uses: kentaro-m/auto-assign-action@v1.2.5 with: configuration-path: .github/auto-assign-learning.yml platform: @@ -50,7 +50,7 @@ jobs: runs-on: - ubuntu-latest steps: - - uses: kentaro-m/auto-assign-action@v1.2.4 + - uses: kentaro-m/auto-assign-action@v1.2.5 with: configuration-path: .github/auto-assign-platform.yml bat: @@ -59,6 +59,6 @@ jobs: runs-on: - ubuntu-latest steps: - - uses: kentaro-m/auto-assign-action@v1.2.4 + - uses: kentaro-m/auto-assign-action@v1.2.5 with: configuration-path: .github/auto-assign-bat.yml diff --git a/.github/workflows/issues-generation.yml b/.github/workflows/issues-generation.yml index a66230ea9..092f5af1a 100644 --- a/.github/workflows/issues-generation.yml +++ b/.github/workflows/issues-generation.yml @@ -15,7 +15,7 @@ jobs: if: ${{ github.event.issue.pull_request || github.event.pull_request }} runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Run Issue Generation uses: jromero/issue-generation-action@v1.0.0-beta.4 id: issues-generation diff --git a/README.md b/README.md index 5692aae70..919f58676 100644 --- a/README.md +++ b/README.md @@ -45,12 +45,13 @@ Once an RFC has been accepted, the sub-team maintainers should: Once an `issues-created/` label has been created for each sub-team, the RFC is ready to merge. The team member who merges the pull request should do the following: 1. Assign an id based off the pull request number. -1. Rename the file based off the ID inside `text/`. -1. Fill in the remaining metadata at the top. -1. Commit everything. -1. Update issues with RFC ID and a link to the text file. -1. Update any links in PR description to point at the committed file. -1. Remove the "Final Comment Period" label. +2. Rename the file based off the ID inside `text/`. +3. Fill in the remaining metadata at the top. +4. Commit everything. +5. Update issues with RFC ID and a link to the text file. +6. Update any links in PR description to point at the committed file. +7. Remove the "status/voting" label. +8. Create a [tracking issue](https://github.com/buildpacks/rfcs/issues/new?assignees=&labels=type%2Ftracking&template=tracking.md&title=%5BRFC+%23%3CINSERT+RFC+NUMBER+-+e.g.%2C+0099%3E%5D+-+%3CINSERT+RFC+TITLE%3E). ## Automation @@ -60,4 +61,6 @@ The `merge-rfc.sh` script automates several steps of the merge process for accep ``` Each `` should be of the form `/#` (e.g. `buildpacks/spec#1`). In the rare case that no work must be done in the project as a result of the RFC pass the `-n` flag to explicitly indicate that no issues should be linked. -After running the `merge-rfc.sh` script, manually verify the output before pushing changes. +After running the `merge-rfc.sh` script: +* Manually verify the output before pushing changes. +* Create a [tracking issue](https://github.com/buildpacks/rfcs/issues/new?assignees=&labels=type%2Ftracking&template=tracking.md&title=%5BRFC+%23%3CINSERT+RFC+NUMBER+-+e.g.%2C+0099%3E%5D+-+%3CINSERT+RFC+TITLE%3E). diff --git a/merge-rfc.sh b/merge-rfc.sh index 8f90848f5..f2c19b34c 100755 --- a/merge-rfc.sh +++ b/merge-rfc.sh @@ -50,13 +50,6 @@ require_command() { require_command git require_command jq -require_command issues-generation - -if [[ -z "${GITHUB_TOKEN:-}" ]]; then - require_command op - echo "> Pulling GitHub token from vault..." - GITHUB_TOKEN=$(op read op://Shared/7xorpxvz3je3vozqg3fy3wrcg4/credential --account buildpacks) -fi #### # INPUTS / VALIDATION @@ -107,25 +100,13 @@ fi RFC_ID=$(generate_id) echo "> Generated RFC number: ${RFC_ID}" -echo "> Creating issues for PR#${PR_NUMBER}" -export GITHUB_TOKEN - -issues-generation create --pr "${OWNER}/${REPO}#${PR_NUMBER}" --bot $BOT_USERNAME --prepend "[RFC #${RFC_ID}] " -ISSUES_TO_LINK=$(issues-generation list --pr "${OWNER}/${REPO}#${PR_NUMBER}" --bot $BOT_USERNAME --json | jq -r '[.[] | select(.num) | .repo + "#" + (.num|tostring) ] | join(" ")') - -for ISSUE in ${ISSUES_TO_LINK}; do - if [[ $ISSUES_TEXT == "N/A" ]]; then - ISSUES_TEXT=$(link_issue "$ISSUE") - else - ISSUES_TEXT+=", $(link_issue "$ISSUE")" - fi -done - - if [[ $NO_ISSUES = false && $ISSUES_TEXT == "N/A" ]]; then - echo -e "ERROR! No issues were provided. Are you sure there are no issues that should be linked?" - echo -e "ERROR! Either -i or -n is required\n" - usage + echo "> Please create an issue by following the link below:" + echo "https://github.com/buildpacks/rfcs/issues/new?assignees=&labels=type%2Ftracking&projects=&template=tracking.md&title=%5BRFC+%23${RFC_ID}%5D+%3C+-+INSERT+RFC+TITLE%3E" + echo "" + read -p "Press Enter to continue" Please enter the issue link: " ISSUES_TEXT fi echo "> Pulling latest changes...." diff --git a/text/0041-api-version-compat.md b/text/0041-api-version-compat.md index ae814167f..252935829 100644 --- a/text/0041-api-version-compat.md +++ b/text/0041-api-version-compat.md @@ -43,7 +43,7 @@ The spec will use future `0.x` API releases to include any desired changes with This RFC does not make changes to the API compatibility rules post 1.0. 1.x APIs version are stil assumed to be purely additive. ## Lifecycle -The lifecycle will use the [lifecycle descriptor](https://github.com/buildpacks/rfcs/blob/pack-publish-buildpack/text/0011-lifecycle-descriptor.md) +The lifecycle will use the [lifecycle descriptor](https://github.com/buildpacks/rfcs/blob/main/text/0011-lifecycle-descriptor.md) to indicate the implemented API versions as before. However, the compatibility assumptions will change. It will be assumed that a lifecycle declaring support for the `0.x` version of the API, supports all 0.2-0.x API versions. diff --git a/text/0078-group-additions.md b/text/0078-group-additions.md index df01c6354..9eed25872 100644 --- a/text/0078-group-additions.md +++ b/text/0078-group-additions.md @@ -3,7 +3,7 @@ - Name: Group additions to Builder order - Start Date: 2020-12-23 - Author(s): [jkutner](@jkutner) -- Status: Implemented +- Status: Approved - RFC Pull Request: [rfcs#129](https://github.com/buildpacks/rfcs/pull/129) - CNB Pull Request: (leave blank) - CNB Issue: [buildpacks/docs#319](https://github.com/buildpacks/docs/issues/319), [buildpacks/pack#1099](https://github.com/buildpacks/pack/issues/1099), [buildpacks/pack#1100](https://github.com/buildpacks/pack/issues/1100), [buildpacks/spec#195](https://github.com/buildpacks/spec/issues/195) diff --git a/text/0093-remove-shell-processes.md b/text/0093-remove-shell-processes.md index 269fee7c8..9a7f1876f 100644 --- a/text/0093-remove-shell-processes.md +++ b/text/0093-remove-shell-processes.md @@ -108,7 +108,7 @@ Using the new API this process could look like: ``` [[processes]] type = "bash" -command = ["-c", "dotnet", "my-app.dll", "--urls", "http://0.0.0.0:${PORT:-8080}"] +command = ["bash", "-c", "dotnet", "my-app.dll", "--urls", "http://0.0.0.0:${PORT:-8080}"] default = true ``` Things to note: @@ -400,4 +400,4 @@ In addition to the changes described originally in 0093 we'd like some way of ve Why was this amendment necessary? -The RFC text should reflect what was actually implemented / agreed upon to avoid confusion. \ No newline at end of file +The RFC text should reflect what was actually implemented / agreed upon to avoid confusion. diff --git a/text/0096-remove-stacks-mixins.md b/text/0096-remove-stacks-mixins.md index 31dee5d68..10a584e5d 100644 --- a/text/0096-remove-stacks-mixins.md +++ b/text/0096-remove-stacks-mixins.md @@ -41,11 +41,11 @@ Instead of a stack ID, runtime and build-time base images must contain the follo - OS (e.g., "linux", `$GOOS`), specified as `os` in the base image `config` - Architecture (e.g., "arm", `$GOARCH`), specified as `architecture` in the base image `config` - Architecture Variant (optional) (e.g., "v6", `$GOARM`), specified as `variant` in the base image `config` -- Distribution (optional) (e.g., "ubuntu", `$ID`), specified as a label `io.buildpacks.distribution.name` -- Version (optional) (e.g., "18.04", `$VERSION_ID`), specified as a label `io.buildpacks.distribution.version` +- Distribution (optional) (e.g., "ubuntu", `$ID`), specified as a label `io.buildpacks.base.distro.name` +- Version (optional) (e.g., "18.04", `$VERSION_ID`), specified as a label `io.buildpacks.base.distro.version` Additionally, the runtime base may contain the following metadata: -- Target ID (optional) (e.g., "minimal"), specified as a label `io.buildpacks.id` +- Target ID (optional) (e.g., "minimal"), specified as a label `io.buildpacks.base.id` OS, Architecture, and Architecture Variant must be valid identifiers as defined in the [OCI Image specification](https://github.com/opencontainers/image-spec/blob/main/config.md). @@ -53,14 +53,14 @@ Target ID is an identifier specified on the runtime base image that must be prov This allows buildpacks to change their behavior if a run image is selected (e.g., distroless) that has special properties outside of OS, architecture, etc. For Linux-based images, each field should be canonicalized against values specified in `/etc/os-release` (`$ID` and `$VERSION_ID`). -The `os.version` field in an base image `config` may contain combined distribution and version information, but it is not used by the lifecycle. +The `os.version` field in a base image `config` may contain combined distribution and version information, but it is not used by the lifecycle. For Windows-based images, Distribution should be empty. Version should be the [suggested value of `os.version`](https://github.com/opencontainers/image-spec/blob/main/config.md#properties) in the OCI spec (e.g., `10.0.14393.1066`). The `stacks` list in `buildpack.toml` is replaced by a `targets` list, where each entry corresponds to a different buildpack image that is exported into a [manifest index](https://github.com/opencontainers/image-spec/blob/master/image-index.md). Each entry may contain multiple valid values for Distribution and/or Version, but only a single OS, Architecture, and Variant. -If the `targets` list is empty and `/bin/build` is present, a target with `os = "linux"` and `arch = "x86_64"` is assumed by tools reading `buildpack.toml`. -If the `targets` list is empty and `/bin/build.bat` or `/bin/build.exe` is present, a target with `os = "windows"` and `arch = "x86_64"` is assumed by tools reading `buildpack.toml`. +If the `targets` list is empty and `/bin/build` is present, a target with `os = "linux"` and `arch = "amd64"` is assumed by tools reading `buildpack.toml`. +If the `targets` list is empty and `/bin/build.bat` or `/bin/build.exe` is present, a target with `os = "windows"` and `arch = "amd64"` is assumed by tools reading `buildpack.toml`. App image builds fail if the build image and selected run image have mismatched metadata. We may introduce flags or additional labels to skip this validation (e.g., for cross-compilation or minimal runtime base images). An image without a specified Distribution is compatible with images specifying any Distribution. @@ -73,25 +73,25 @@ When an app image is rebased, `rebaser` must fail if the new run image and previ ```toml [[targets]] os = "linux" -arch = "x86_64" +arch = "amd64" [[targets.distributions]] name = "ubuntu" versions = ["18.04", "20.04"] [[targets]] os = "linux" -arch = "x86_64" -[[targets.distributions]] +arch = "amd64" +[[targets.distros]] name = "ubuntu" -versions = ["14.04", "16.04"] +version = "16.04" [[targets]] os = "linux" arch = "arm" variant = "v6" -[[targets.distributions]] +[[targets.distros]] name = "ubuntu" -versions = ["14.04", "16.04"] +version = "16.04" ``` ## Runtime Metadata @@ -152,12 +152,12 @@ If the newly-specified field values are missing, the lifecycle and pack may used ``` config.os = "linux" -config.architecture = "x86_64" -io.buildpacks.distribution.name = "ubuntu" -io.buildpacks.distribution.version = "18.04" +config.architecture = "amd64" +io.buildpacks.base.distro.name = "ubuntu" +io.buildpacks.base.distro.version = "18.04" ``` -Moving forward it's encouraged for buildpack authors to support both `[[stacks]]` and `[[targets]]` sections in `buildpack.toml` for maximum compatibility. In order to ease this process for those using the `io.buildpacks.stacks.bionic`, lifecycle will translate any section that sets this as on of the `stacks`: +Moving forward it's encouraged for buildpack authors to support both `[[stacks]]` and `[[targets]]` sections in `buildpack.toml` for maximum compatibility. In order to ease this process for those using the `io.buildpacks.stacks.bionic`, lifecycle will translate any section that sets this as one of the `stacks`: ```toml [[stacks]] @@ -169,8 +169,8 @@ to ```toml [[targets]] os = "linux" -arch = "x86_64" -[[targets.distributions]] +arch = "amd64" +[[targets.distros]] name = "ubuntu" versions = ["18.04"] ``` @@ -201,3 +201,32 @@ versions = ["18.04"] [spec-changes]: #spec-changes This RFC requires extensive changes to all specifications. + +## Amended +### Summary + +rename x86_64 -> amd64 in keeping with all other usages of arch. descriptors. + +### Motivation + +This is how we do it everywhere else, this is the way. + +## Amended +### Meta +[meta-1]: #meta-1 +- Name: Rename Docker labels and `buildpack.toml` table names +- Start Date: 2024-04-08 +- Author(s): @edmorley +- Amendment Pull Request: [rfcs#310](https://github.com/buildpacks/rfcs/pull/310) + +### Summary + +Changes were made to the Docker label and `buildpack.toml` table names between when this RFC was written and the changes were made to the spec in [spec#365](https://github.com/buildpacks/spec/pull/365), which have now been backported to the RFC: + +- The `io.buildpacks.distributions.*` Docker labels were renamed to `io.buildpacks.base.distro.*`. +- The `io.buildpacks.id` Docker label was renamed to `io.buildpacks.base.id`. +- The `buildpack.toml` table `[[targets.distributions]]` was renamed to `[[targets.distros]]` and the `versions` field within it renamed to `version` (along with its type changing from an array to a string). + +### Motivation + +To prevent use of the wrong Docker label or `buildpack.toml` table names, if users base their implementations on the RFC rather than reading the spec. diff --git a/text/0109-build-config.md b/text/0109-build-config.md index d531e447e..fde9e5e8f 100644 --- a/text/0109-build-config.md +++ b/text/0109-build-config.md @@ -12,7 +12,7 @@ # Summary [summary]: #summary -This RFC proposes an easy way to configure build images to allow specifying a `/cnb/config/env.build` CNB environment directory that allows updating the Buildpack `detect` and `build` environment based on the directory. +This RFC proposes an easy way to configure build images to allow specifying a `/cnb/build-config` CNB environment directory that allows updating the Buildpack `detect` and `build` environment based on the directory. # Definitions @@ -40,10 +40,10 @@ The environment variables may ideally also take precendence over any user provid # What it is [what-it-is]: #what-it-is -The RFC proposes the introduction of the following directory `/cnb/config/env.build` in build images. The directory follows the same convention as a `CNB environment directory`. The notable difference is that the environment variables sourced from this directory are applied **AFTER** processing the user-provided platform environment variables i.e. they should have the highest precedence. These variables should be available during both `detect` and `build` phases (and the `generate` phase in the future). +The RFC proposes the introduction of the following directory `/cnb/build-config/env` in build images. The directory follows the same convention as a `CNB environment directory`. The notable difference is that the environment variables sourced from this directory are applied **AFTER** processing the user-provided platform environment variables i.e. they should have the highest precedence. These variables should be available during both `detect` and `build` phases (and the `generate` phase in the future). -The operator can define this directory in the build image under `/cnb/config` or `CNB_CONFIG_DIR` if defined. +The operator can define this directory in the build image under `/cnb/build-config` or `CNB_BUILD_CONFIG_DIR` if defined. # How it Works [how-it-works]: #how-it-works @@ -58,7 +58,7 @@ Final value: `FOO=test` Buildpack value: `FOO=test` -Build config: `FOO.append=another-value, FOO.delim="` +Build config: `FOO.append=another-value, FOO.delim=:` Final value: `FOO=test:another-value` Buildpack value: `FOO=test` @@ -118,5 +118,5 @@ N/A Addition of the definition of the above directory in the Platform specification i.e. - -- `CNB_CONFIG_DIR` -- `/cnb/config/env.build` +- `CNB_BUILD_CONFIG_DIR` +- `/cnb/build-config/` diff --git a/text/0115-rebase-immutable-image-ref.md b/text/0115-rebase-immutable-image-ref.md new file mode 100644 index 000000000..901911b49 --- /dev/null +++ b/text/0115-rebase-immutable-image-ref.md @@ -0,0 +1,118 @@ +# Meta +[meta]: #meta +- Name: Rebase by Image Digest Reference +- Start Date: 2022-12-08 +- Author(s): [@joeybrown-sf](https://github.com/joeybrown-sf) +- Status: Implemented +- RFC Pull Request: [rfcs#262](https://github.com/buildpacks/rfcs/pull/262) +- CNB Pull Request: https://github.com/buildpacks/lifecycle/pull/985 +- CNB Issue: [buildpacks/lifecycle#983](https://github.com/buildpacks/lifecycle/issues/983) +- Supersedes: N/A + +# Summary +[summary]: #summary + +Allow passing a digest reference as rebase target. + +# Definitions +[definitions]: #definitions + +An **image reference** refers to either a **tag reference** or **digest reference**. + +A **tag reference** refers to an identifier of form `/:` which locates an image manifest in an [OCI Distribution Specification](https://github.com/opencontainers/distribution-spec/blob/master/spec.md) compliant registry. + +A **digest reference** refers to a [content addressable](https://en.wikipedia.org/wiki/Content-addressable_storage) identifier of form `/@` which locates an image manifest in an [OCI Distribution Specification](https://github.com/opencontainers/distribution-spec/blob/master/spec.md) compliant registry. + + +# Motivation +[motivation]: #motivation + +Enables rebasing by targeting an immutable image digest. There are some scenarios where the **digest reference** is preferred over **tag reference**. + +# What it is +[what-it-is]: #what-it-is + +This is a feature to expand the lifecycle rebase command to allow targeting an image by either `tag` or `digest`. + +Today, `lifecycle` returns the following error when appempting to use a **digest reference**: +``` +ERROR: failed to rebase: failed to write image to the following tags: [localhost:5003/foo/bar@sha256:916a9e100569ee521b86d03b8499b9b93d7d256d6e838868ae720295f2ea2f76: PUT http://localhost:5003/v2/foo/bar/manifests/sha256:916a9e100569ee521b86d03b8499b9b93d7d256d6e838868ae720295f2ea2f76: DIGEST_INVALID: provided digest did not match uploaded content] +``` + +This error could be avoided if digest references were permitted. + +# How it Works +[how-it-works]: #how-it-works + +Today, we can execute rebase by using **tag references** but not **digest references**. + +Here are some examples of valid rebase commands. **Tag** is `latest` if not specified: + +``` +lifecycle rebase my-repo/foo +``` +``` +lifecycle rebase my-repo/foo:latest +``` +``` +lifecycle rebase my-repo/foo:v4 +``` + +It is not currently possible to target an image using a **digest reference**. + +_The proposed feature will provide a mechanism to target an image rebase by tag reference or digest reference._ + +Here is what targeting an image via digest will look like: +``` +lifecycle rebase -previous-image my-repo/foo@sha256:1234 -tag my-repo/foo:rebase my-repo/foo +``` + +- When using a digest reference as the image target, the caller may specify zero or more `` to apply to exported image. If no `tag` is provided, `latest` will be used. +- If `-previous-image` is not provided, it is infered from the first argument. This is similar behavior to `analyzer`, for instance. + +# Migration +[migration]: #migration + +This is backwards compatible. + +# Drawbacks +[drawbacks]: #drawbacks + +# Alternatives +[alternatives]: #alternatives + +# Prior Art +[prior-art]: #prior-art + +`pack` explicitly does not support this. There is a friendly validation message in `pack`: + +` is not a tag reference` + +# Unresolved Questions +[unresolved-questions]: #unresolved-questions + + +# Spec. Changes (OPTIONAL) +[spec-changes]: #spec-changes + + +# History +[history]: #history + + diff --git a/text/0116-stop-deleting-cache-images.md b/text/0116-stop-deleting-cache-images.md new file mode 100644 index 000000000..821ca38df --- /dev/null +++ b/text/0116-stop-deleting-cache-images.md @@ -0,0 +1,74 @@ +# Meta +[meta]: #meta +- Name: Stop deleting cache images +- Start Date: 2022-03-31 +- Author(s): jabrown85 +- Status: Approved +- RFC Pull Request: [rfcs#216](https://github.com/buildpacks/rfcs/pull/216) +- CNB Pull Request: (leave blank) +- CNB Issue: N/A +- Supersedes: (put "N/A" unless this replaces an existing RFC, then link to that RFC) + +# Summary +[summary]: #summary + +Lifecycle will no longer try to delete previous cache images when publishing cache images. + +# Definitions +[definitions]: #definitions + +* Cache Image: The cache stored between builds - stored in a registry. +* ECR: Amazon's container registry product + +# Motivation +[motivation]: #motivation + +- Why should we do this? + As discussed in [buildpacks/lifecycle#803](https://github.com/buildpacks/lifecycle/issues/803), some registries (ECR) do not support `DELETE`. For platforms that work exclusively with such registries, the warning output by lifecycle and the time taken to fail is unavoidable and lifecycle is wasting time trying to complete an operation that will never succeed. + +- What use cases does it support? + All platforms that use cache images against registries that do not support delete will no longer see warning messages. + +- What is the expected outcome? + Platforms will need to handle cleanup of their cache images on their own, if they desire. + +# What it is +[what-it-is]: #what-it-is + +Lifecycle will no longer attempt to delete cache images during cache image export. + +# How it Works +[how-it-works]: #how-it-works + +Lifecycle will no longer attempt to delete cache images during cache image export. + +# Migration +[migration]: #migration + +Lifecycle will document this behavior change in Release Notes/Changelog along with the associated Platform API that enables the new behavior. + +# Drawbacks +[drawbacks]: #drawbacks + +Why should we *not* do this? + +Platform authors relying on this behavior will need to take additional measures to ensure cache image cleanup or the destination registry will continue to grow. + +# Alternatives +[alternatives]: #alternatives + +- What other designs have been considered? + * Add regex or configuration to drive registry hosts to ignore during DELETE * + * Stop deleting cache images by default in newer platform API versions, but add a platform-level configuration to enable previous behavior. +- Why is this proposal the best? + * Lifecycle is not currently cleaning up any other resources + * Deleting the cache images can hurt reproducibility + * There are more public registries that don't allow DELETE +- What is the impact of not doing this? + * End users continue seeing warnings the platform can do nothing about. + + +# Prior Art +[prior-art]: #prior-art + +Discussion at [buildpacks/lifecycle#803](https://github.com/buildpacks/lifecycle/issues/803). diff --git a/text/0117-buildpacks-community.md b/text/0117-buildpacks-community.md new file mode 100644 index 000000000..d92c089ab --- /dev/null +++ b/text/0117-buildpacks-community.md @@ -0,0 +1,156 @@ +# Meta +[meta]: #meta +- Name: Buildpacks Community +- Start Date: 2023-01-26 +- Author(s): [@samj1912](https://github.com/samj1912) +- Status: Approved +- RFC Pull Request: [rfcs#273](https://github.com/buildpacks/rfcs/pull/273) +- CNB Pull Request: (leave blank) +- CNB Issue: N/A +- Supersedes: (put "N/A" unless this replaces an existing RFC, then link to that RFC) + +# Summary +[summary]: #summary + +The Buildpacks Community is a vendor-neutral Github organization where trusted community provided Cloud Native Buildpacks tooling, platforms and integrations can live. This would provide users a trusted place to search for Buildpack integrations maintained by the community. + + +# Definitions +[definitions]: #definitions + +- **Buildpacks Community** - The Buildpacks Community is a vendor-neutral Github organization where trusted community provided Cloud Native Buildpacks tooling, platforms and integrations can live. +- **Buildpacks Leadership** - The Buildpacks Leadership is a group of trusted individuals who are responsible for the Buildpacks Community. They have the ability to create repositories in the Buildpacks Community and approve new projects to be added to the Buildpacks Community. This will consist of the TOC and Team-leads of the Buildpacks project. +- **Buildpacks TOC** - The Buildpacks TOC is the technical oversight committee for the Buildpacks project. The TOC is responsible for the technical direction of the Buildpacks project. + + +# Motivation +[motivation]: #motivation + +There are two reasons why this community should exist. + +- The Buildpacks Community will allow for the testing of new technologies or the development of integrations in an environment that is more flexible than that of the core Buildpacks organization. This will provide a staging area for integrations that the Buildpacks team deems important but we are not yet ready to commit to long term maintanance. + +- A trusted repository of community integrations will also allow for a trusted source of integrations that solve common yet still relatively niche problems that are not suitable to be added to core Buildpacks organization. This will highlight integrations of high-quality and provide a vendor-neutral umbrella for them to live. They will also benefit from improved CI/CD resources and a common governance model. + +# What it is +[what-it-is]: #what-it-is + + + +For a project to be admitted to the Buildpacks community organization, it must meet the following criteria: + +- The project must be a tooling, platform or integration that is related to Cloud Native Buildpacks. +- The project must be open source and licensed under Apache 2.0. +- It must follow the Cloud Native Computing Foundation Code of Conduct. +- The project must enable DCO signoff for all commits. +- The project must be open to contributions and have a public issue tracker. +- The project must have a governance document that clearly defines the project maintainers and how they are elected. Each project may choose to define their own governance model as long as it is clearly documented and allows for project maintainers to be elected from the community. +- The list of project maintainers must be publicly available and controlled through a Github team. +- The project must use a CODEOWNERS file to define the maintainers for each repository. The CODEOWNERS file should reference the Github team that controls the list of maintainers. +- All project contributors must be members of the Buildpacks community organization. +- The project must be actively maintained (i.e. issues and pull requests must be addressed regularly, approved pull requests must be merged or updated in a timely manner, etc.). +- There should have visible automated testing for all repositories that are part of the project. +- The project maintainers must conform to a set of best effort SLOs around patching critical CVEs when applicable to the project. +- The project should strive have the following community health files: + - CONTRIBUTING.md: A guide to how contributors should submit patches and the expectations around code review. + - DEVELOPMENT.md: A guide to how contributors should develop the project. + - ADOPTERS.md: A list of adopters of the project. + - VERSIONING.md: A guide to how versioning is done for the project. + - RELEASE.md: A guide to how releases are done for the project. + - SECURITY.md: A guide to how security vulnerabilities should be reported. + +This criteria is meant to alleviate the following problems: + +- All projects must meet some testing standard to be trusted in order to ensure that the projects support the latest Buildpacks APIs and are actively maintained. +- All projects must have a clearly defined governance model to ensure that the project maintainers are elected from the community and that the project is open to contributions. +- There must be a defined system in place to reap abandonware. +- If a project maintainers are not making a best effort of patching out or updating vulnerable software then the project as a whole is untrustworthy. + + +# How it Works +[how-it-works]: #how-it-works + +## Project Admission + +A project can be admitted to the Buildpacks community organization by creating a Github issue in the Buildpacks community repository. The issue should contain the following information: + +- Name of the project +- Evidence for the above criteria +- A list of maintainers for the project + +The above information will be structured into an appropriate issue template. The Buildpacks Leadership will review the issue and if the project meets the above criteria, the project will be added to the Buildpacks community organization. The Buildpacks Leadership will assign a team to the project and the team lead of the team will steward the project - i.e., will be responsible for ensuring that the project meets the above criteria. + +Once admitted, the team lead of the steward team will create a Github team for the project and add the project maintainers to the team and mark them as the team maintainers allowing them to add other maintainers. The existing team maintainers of the steward team will be added as maintainers to the project team. + +The team lead will also create a CODEOWNERS file for the project and add the project maintainers as the code owners. + +The project maintainers will be responsible for maintaining the list of project maintainers and ensuring that all project contributors are members of the Buildpacks community organization. + +They will be able to add new Github members to the organization by creating a Github issue in the Buildpacks community [invites repository](https://github.com/buildpacks-community/invites). + +## Project Removal + +In case the project fails to meet the above criteria, the Buildpacks Leadership will work with the project maintainers to address the issues and if the project is still not ready, the project will be archived or removed from the Buildpacks community organization. + +## Project Graduation + +In case a project is deemed to be mature enough to be part of the core Buildpacks organization, the project maintainers can request for the project to be graduated to the core Buildpacks organization via the [Component Contribution RFC](https://github.com/buildpacks/rfcs/blob/main/text/0108-governance-component-maintainer-role.md). The Buildpacks TOC will review the request and if the project meets the criteria for graduation, the project will be moved to the core Buildpacks organization. + + +# Migration +[migration]: #migration + +N/A + +# Drawbacks +[drawbacks]: #drawbacks + + +N/A + +# Alternatives +[alternatives]: #alternatives + +N/A + +# Prior Art +[prior-art]: #prior-art + +- [CNCF Sandbox](https://www.cncf.io/sandbox-projects/) +- [Paketo Community](https://github.com/paketo-buildpacks/rfcs/blob/main/text/0008-paketo-community.md) +- [Argoproj Labs](https://github.com/argoproj-labs) +- [Crossplane Contrib](https://github.com/crossplane-contrib) + +# Unresolved Questions +[unresolved-questions]: #unresolved-questions + +- The exact project onboarding issue template. + +# History +[history]: #history + + + diff --git a/text/0118-2023H1-roadmap.md b/text/0118-2023H1-roadmap.md new file mode 100644 index 000000000..ca582b201 --- /dev/null +++ b/text/0118-2023H1-roadmap.md @@ -0,0 +1,166 @@ +# Meta +[meta]: #meta +- Name: 2023H1 Roadmap +- Start Date: 2023-01-24 +- Author(s): hone +- Status: Approved +- RFC Pull Request: [#272](https://github.com/buildpacks/rfcs/pull/272) +- CNB Pull Request: (leave blank) +- CNB Issue: +- Supersedes: (put "N/A" unless this replaces an existing RFC, then link to that RFC) + +# Summary +[summary]: #summary + +This RFC details the first half of the 2023 Roadmap as well as changing the roadmaps to twice a year aligning with KubeCon EU and NA (CDD - Conference Driven Development). + +# Definitions +[definitions]: #definitions + +N/A + +# Motivation +[motivation]: #motivation + +The project has done annual roadmaps in the past, but they haven't been without their challenges. The items range from concrete to large themes. The larger scoped items like "Integration with the Cloud Native Ecosystem" is nebulous and not always clear what success look at the end of it. The roadmap has felt like a smaller vision document versus a set of prioritized items to be accomplished. + +Without clear guidance on how to shape the roadmap, it's also been a challenge to get one out (not that we're doing great this year). We missed 2022's roadmap altogether and didn't publish one until March for 2021. + +Once we did publish a roadmap, items wouldn't necessarily move forward even though we commited as a project to work on them. Items didn't always have an owner and there was no skin in the game for suggesting an idea. + +In addition, we don't do a good job of reviewing our roadmap whether that's regularly through the year or at the end of the year. + +As an incubation level project, there is an opportunity to broadcast announcements at both KubeCon EU and NA. As a project, we're often caught flatfooted with sharing highlights or announcements during these conferences. + + +# What it is +[what-it-is]: #what-it-is + +## Roadmap Changes + +For 2023, I want to propose trying something different this time around. The project will publish two roadmaps one for each half of the year aligned with work to be completed for KubeCon EU (2023-04-18) and KubeCon NA (2023-11-6). The goals I'm hoping to see: + +- Focus - Decrease the amount we're doing as a project, so we're able to deliver on the things we commit to. +- Accountability - With that focus, we also need to be accountable for what we're putting on this list. +- Marketing - Everyone involved works hard to make this project successful. We should take time to celebrate and talk about the work being done. + +### Smaller Scope + +Going forward, the roadmap will account for 6 months (in this case it's just under 4 months) worth of work. This forces items on the roadmap to be smaller and concrete since they have to be something that can be accomplished in that time frame. While it was nice having the large items, they were hard to execute and it wasn't clear what the finished state looked like. Any larger piece work will need to broken down to make it on the roadmap in a 6 month chunk. It's also not too small, that some larger chunks of work can be planned. Having a second roadmap each year, also allows us to course correct mid-year. + +Not only are individual items smaller, but we as a project should commit to less so they can be accomplished aren't just a bunch of empty promises. This means hard decisions will need to be made to cut highly requested features from the roadmap. + +Since this is an OSS project, things that don't make the roadmap can still be pursued by others and welcome! With finite time of maintainers, there still may be limited support depending on the maintainer. Also, items can also bubble up during the next roadmap cycle and people should advocate for them in the GitHub discussion or slack. + +### Ownership + +In order to ensure things make progress, every item in the roadmap will have an owner from the project leadership team of TOC members or team maintainers. If someone really wants something on there, they will need to volunteer themselves to help keep it on track if no one else will. This will help keep the number of items on the roadmap from ballooning. The owner will also be responsible for keeping everyone up to date. All roadmap items should link to something to GitHub where things can be tracked. + +### KubeCon Alignment + +As stated above, we'll be making roadmaps with work finishing by a KubeCon event. They're coveniently around 6 months apart give or take a few weeks. As an incubation project, we're able to share project announcements at these events. This will keep the key items we're working on top of mind, and what gets finished can easily be shared. Roadmap items can also make good talk material for the maintainer track. + +With how strict travel budgets are in the current economic climate, these events are one of the few times some of us can get together. Nothing can really replace in person conversations and brainstorming. These discussions can feed into the natural recap/review of the conference and as a way to kickstart roadmap review and planning. + +## 2023H1 Roadmap + +### Release Base Image Extension +* Owner: @natalieparellano +* Links: [RFC](https://github.com/buildpacks/rfcs/blob/main/text/0105-dockerfiles.md) + +This started out as [Stack Buildpacks](https://github.com/buildpacks/rfcs/blob/main/text/0069-stack-buildpacks.md) and now Dockerfile Extensions. Significant work has already been done on this feature over the last year. This roadmap item is about seeing this work through with releasing phase 3 in both `lifecycle` and `pack`. + +### Remove Stacks & Mixins +* Owner: @jkutner +* Links: [RFC](https://github.com/buildpacks/rfcs/blob/main/text/0096-remove-stacks-mixins.md) + +This RFC was merged in 2021 and is a dependency on Base Image Extensions. In order to get us to 1.0, we'll need to take on some of these painful backwards breaking changes in the best way possible. This work will include the Buildpack & Platform spec changes with support in `lifecycle` and `pack`. + +### Execution Environments RFC +* Owner: @hone +* Links: [RFC](https://github.com/buildpacks/rfcs/pull/274) + +There has long been a desire for a "Test" support, but it's never been prioritized even though it's made the roadmap before. Not to be over ambitious, the first step is to get a RFC written and accepted. + +### Project Health +* Owner: @samj1912 +* Links: [Buildpacks Community Organization RFC](https://github.com/buildpacks/rfcs/pull/273) + +Like other [CNCF projects](https://github.com/cncf/toc/issues?q=is%3Aissue+sort%3Aupdated-desc+%22health+of%22+-label%3A%22project+onboarding%22+-label%3A%22sandbox%22+), the project has been impacted by the VMware + Broadcom acquisition. The goal of this item is to improve the general health of the project and grow contributors back to our 2020 numbers. This inludes every team having a set of active set of maintainers and contributors, thus removing the TOC needing to step in for platform. + +As for concrete items to be accomplished: + +* Establish a buildpacks-community to be used as a labs/staging area to help hype up experiments that we would be otherwise wary of investing in and, if they succeed, adopt them in the main buildpacks org. +* Participate in mentorship programs to grow contributors like [GSoC](https://summerofcode.withgoogle.com/) and [LFX Mentorship](https://lfx.linuxfoundation.org/tools/mentorship/). + +### Pack Test Cleaning/Optimizations +* Owner: @dfreilich +* Links: [Pack Pull Request](https://github.com/buildpacks/pack/pull/1498) + +Currently, the pack acceptance tests are very complex for newcomers. In order to help with contributions, we can relax some of these tests. + +# How it Works +[how-it-works]: #how-it-works + +See [What it is](#what-it-is) for the bulk of the details. For implementing this plan: + +* Open a PR against the [community repo](https://github.com/buildpacks/community) replacing the `ROADMAP.md`. +* As part of the regular leadership meetings we will hold check ins. +* After each KubeCon there will be a recap session and kicking off the next roadmap planning. +* In 2024, we will review how this compares to the normal annual roadmap we've traditionally done. + +# Migration +[migration]: #migration + +N/A + +# Drawbacks +[drawbacks]: #drawbacks + +- This will be more work with twice the number of roadmap plannings. +- There will be more overhead to ensure accountability. + +# Alternatives +[alternatives]: #alternatives + +## Do Nothing + +We can continue to do the annual roadmap. This hasn't proved very successful, but we can still take the lessons learned and adjust the current process. + +# Prior Art +[prior-art]: #prior-art + +- [Buildpacks 2021 Roadmap](https://github.com/buildpacks/community/pull/72) +- [Rust 2021 Roadmap](https://blog.rust-lang.org/2020/09/03/Planning-2021-Roadmap.html) +- [TypeScript Roadmap](https://github.com/microsoft/TypeScript/wiki/Roadmap) + +# Unresolved Questions +[unresolved-questions]: #unresolved-questions + +- How many items should any one person be able to own? + +# Spec. Changes (OPTIONAL) +[spec-changes]: #spec-changes + +N/A + +# History +[history]: #history + + diff --git a/text/0119-export-to-oci.md b/text/0119-export-to-oci.md new file mode 100644 index 000000000..2dacbc55e --- /dev/null +++ b/text/0119-export-to-oci.md @@ -0,0 +1,732 @@ +# Meta +[meta]: #meta +- Name: Export to OCI format +- Start Date: 2022-02-22 +- Author(s): Juan Bustamante (@jjbustamante) +- Status: Approved +- RFC Pull Request: [rfcs#203](https://github.com/buildpacks/rfcs/pull/203) +- CNB Pull Request: (leave blank) +- CNB Issue: N/A +- Supersedes: (put "N/A" unless this replaces an existing RFC, then link to that RFC) + +# Summary +[summary]: #summary + +Add the capability to the `Exporter` phase to save the image to disk in [OCI Layout](https://github.com/opencontainers/image-spec/blob/main/image-layout.md) format. + +# Definitions +[definitions]: #definitions + +- A [Platform](https://buildpacks.io/docs/concepts/components/platform/) uses a lifecycle, Buildpacks (packaged in a builder), and application source code to produce an OCI image. +- A [Lifecycle](https://buildpacks.io/docs/concepts/components/lifecycle/) orchestrates Buildpacks execution, then assembles the resulting artifacts into a final app image. +- A **Daemon** is a service, popularized by Docker, for downloading container images, and executing and managing containers from those images. +- A **Registry** is a long-running service used for storing and retrieving container images. +- An **image reference** refers to either a tag reference or digest reference. +- A **tag reference** refers to an identifier of form `/:` which locates an image manifest in an [OCI Distribution Specification](https://github.com/opencontainers/distribution-spec/blob/master/spec.md) compliant registry. +- A **digest reference** refers to a [content addressable](https://en.wikipedia.org/wiki/Content-addressable_storage) identifier of form `/@` which locates an image manifest in an [OCI Distribution Specification](https://github.com/opencontainers/distribution-spec/blob/master/spec.md) compliant registry. +- A **image Manifest** provides a configuration and set of layers for a single container image for a specific architecture and operating system. +- The **layer diffID** is the hash of the uncompressed layer +- The **layer digest** is the hash of the compressed layer. +- An [OCI Image Layout](https://github.com/opencontainers/image-spec/blob/main/image-layout.md) is the directory structure for OCI content-addressable blobs and [location-addressable](https://en.wikipedia.org/wiki/Content-addressable_storage#Content-addressed_vs._location-addressed) references. + +# Motivation +[motivation]: #motivation + +### Why should we do this? + +Lifecycle translates an application source code into an OCI image, in order to do this, it can be configured to interact with a docker daemon (using `daemon` flag) or with an OCI registry. + +The [OCI Image Layout](https://github.com/opencontainers/image-spec/blob/main/image-layout.md) is the directory structure for OCI content-addressable blobs and [location-addressable](https://en.wikipedia.org/wiki/Content-addressable_storage#Content-addressed_vs._location-addressed) references. + +The current process, executed by the lifecycle, does not take into consideration cases where a platform implementor may require to pass through the inputs or want to save the final application image on disk using [OCI Image Layout](https://github.com/opencontainers/image-spec/blob/main/image-layout.md) format. + +### What use cases does it support? + +Exporting to [OCI Image Layout](https://github.com/opencontainers/image-spec/blob/main/image-layout.md) will enable new user's workflows on top of this functionality. For example: + - It provides a mechanism to reduce the Lifecycle complexity by removing the interaction with the Daemon in the future. + - Solve the problem of losing information when the image is saved into the Daemon, keeping the image on disk along with the metadata generated by the Lifecycle. The OCI Image can be used as input for other tools to offer more capabilities to the end users. + - This feature will help to unblock uses cases like + - OCI annotations. See [RFC](https://github.com/buildpacks/rfcs/pull/196) + - Cosign integration. See [RFC](https://github.com/buildpacks/rfcs/pull/195) + - Export to tarball. See [issue](https://github.com/buildpacks/lifecycle/issues/423) + +### What is the expected outcome? + +Lifecycle will be capable of exporting the application image into disk in [OCI Image Layout](https://github.com/opencontainers/image-spec/blob/main/image-layout.md) format. The image saved on disk could have the following considerations: + +- The `blobs` directory MAY be missing `base image` or `run image` blobs. These layers may not be needed on disk as they could be already accessible in a blob store. +- The `blobs` directory SHOULD always have buildpacks generated `blobs`. + + +# What it is +[what-it-is]: #what-it-is + +The proposal is to add a new capability to the lifecycle (enabled by configuration) to resolve any **image reference** (input or output) to a disk location in [OCI Image Layout](https://github.com/opencontainers/image-spec/blob/main/image-layout.md) format. It means, instead of interacting with a daemon or registry lifecycle will interact against the filesystem to read or write any **image reference**. + +The target personas affected by this change are: + +- **Platform implementors**: they will have to take care of the responsibility of creating a store resource on disk in [OCI Image Layout](https://github.com/opencontainers/image-spec/blob/main/image-layout.md) format and pass it through the lifecycle during the phases execution. + +The process of writing any image on disk in [OCI Image Layout](https://github.com/opencontainers/image-spec/blob/main/image-layout.md) format could be expensive in terms of hard drive space or IO operation (compressing or uncompressing layers). In order to provide flexibility for the implementation, the `analyzer` or `exporter` binaries only require the *Image Manifest* and the *Image Config* to execute their operations on the previous image and run image; based on this, we proposed the Lifecycle can be configured to work with a partial representation of the images on disk, meaning that some blobs MAY be missing (which is ok according to the [OCI Image Layout](https://github.com/opencontainers/image-spec/blob/main/image-layout.md) format). The missing blobs SHOULD be those that are already available in a daemon or registry. + +Let's see some examples of the proposed behavior + +## Examples + +### Requirements + +Lifecycle will converts image references into local paths following define [rules](#how-to-map-an-image-reference-into-a-path-in-the-layout-repository) and the content must be in [OCI Image Layout](https://github.com/opencontainers/image-spec/blob/main/image-layout.md) format. + +Let's suppose a *platform implementor* creates a directory with the following structure: + +```=shell +layout-repo +└── index.docker.io + ├── cnb + │ ├── my-full-stack-run:bionic + │ │ └── bionic + │ │ └── blobs + │ │ ├── sha256 + │ │ │ ├── 1f59...944a // manifest + │ │ │ ├── 6388...af5a // config + │ │ │ ├── 824b...f984e + │ │ │ ├── f5f7...5b38 + │ │ │ └── 870e...f1b09 + │ │ ├── index.json + │ │ └── oci-layout + │ └── my-partial-stack-run:bionic + │ └── bionic + │ ├── blobs + │ │ └── sha256 + │ │ ├── 1f59...944a // manifest + │ │ └── 6388...af5a // config + │ ├── index.json + │ └── oci-layout + └── bar + └── my-previous-app + └── latest + ├── blobs + │ └── sha256 + │ ├── 4bcd5..x // app image manifest + │ ├── 5f789..d // app image config + │ ├── 624b...f984e // run layer + │ └── 4g234..f // buildpack layer + ├── index.json + └── oci-layout +``` + +The images named **cnb/my-full-stack-run** and **cnb/my-partial-stack-run** represents the same image but the partial one has missing `blobs`, those `blobs` are the layers that are already available in the store from it came from. + +For each example case, I will present two ways of enabling the new capability: + +- Using an environment variables +- Using the new `-layout` and `layout-dir` flags + +In any case the expected output is the same. + +#### Analyze phase + +##### Analyzing run-image full saved on disk + +```=shell +> export CNB_USE_LAYOUT=true +> export CNB_LAYOUT_DIR=/layout-repo +> /cnb/lifecycle/analyzer -run-image cnb/my-full-stack-run:bionic my-app-image + +# OR + +> /cnb/lifecycle/analyzer -layout -layout-dir /layout-repo -run-image cnb/my-full-stack-run:bionic my-app-image +``` + +expected analyzed.toml output + +```=toml +[run-image] + reference = "/layout-repo/index.docker.io/cnb/my-full-stack-run/bionic@sha256:fab3bb83de466ed29d7e9dcfdbee5b5fb2ff90e91bc849af85b261b4c2062a7a" + +``` + +##### Analyzing run-image partial saved on disk + +```=shell +> export CNB_USE_LAYOUT=true +> export CNB_LAYOUT_DIR=/layout-repo +> /cnb/lifecycle/analyzer -run-image cnb/cnb/my-partial-stack-run:bionic my-app-image + +# OR + +> /cnb/lifecycle/analyzer -layout -layout-dir /layout-repo -run-image cnb/cnb/my-partial-stack-run:bionic my-app-image +``` + +expected analyzed.toml output + +```=toml +[run-image] + reference = "/layout-repo/index.docker.io/cnb/my-partial-stack-run@sha256:fab3bb83de466ed29d7e9dcfdbee5b5fb2ff90e91bc849af85b261b4c2062a7a" + +``` + +##### Analyzing previous-image + + ```=shell +> export CNB_USE_LAYOUT=true +> export CNB_LAYOUT_DIR=/layout-repo +> /cnb/lifecycle/analyzer -run-image cnb/my-full-stack-run:bionic -previous-image bar/my-previous-app my-app-image + +# OR + +> /cnb/lifecycle/analyzer -layout -layout-dir /layout-repo -run-image cnb/my-full-stack-run:bionic-previous-image bar/my-previous-app my-app-image +``` + +expected analyzed.toml output + +```=toml +[run-image] + reference = "/layout-repo/index.docker.io/cnb/my-partial-stack-run/bionic@sha256:fab3bb83de466ed29d7e9dcfdbee5b5fb2ff90e91bc849af85b261b4c2062a7a" + +[previous-image] + reference = "/layout-repo/index.docker.io/bar/my-previous-app/latest@sha256:aa0cf7fc8f161bdb96166c1644174affacd70d17f372373ca72c8e91116e2d43" + +``` + +##### Analyzing run-image not saved on disk + +```=shell +> export CNB_USE_LAYOUT=true +> export CNB_LAYOUT_DIR=/layout-repo +> /cnb/lifecycle/analyzer -run-image cnb/bad-run-image my-app-image + +# OR + +> /cnb/lifecycle/analyzer -layout -layout-dir /layout-repo -run-image cnb/bad-run-image my-app-image + +# expected output + +ERROR: the run-image could not be found at path: /layout-repo/index.docker.io/cnb/bad-run-image/latest +``` + +##### Analyzing without run-image argument + +```=shell +> export CNB_USE_LAYOUT=true +> export CNB_LAYOUT_DIR=/layout-repo +> /cnb/lifecycle/analyzer my-app-image + +# OR + +> /cnb/lifecycle/analyzer -layout -layout-dir /layout-repo my-app-image + +# expected output + +ERROR: -run-image is required when OCI Layout feature is enabled +``` + +##### Analyzing without layout-dir argument + +```=shell +> export CNB_USE_LAYOUT=true +> /cnb/lifecycle/analyzer -run-image cnb/bad-run-image my-app-image + +# OR + +> /cnb/lifecycle/analyzer -layout -run-image cnb/bad-run-image my-app-image + +# expected output + +ERROR: defining a layout directory is required when OCI Layout feature is enabled. Use -layout-dir flag or CNB_LAYOUT_DIR environment variable +``` + +Let's also check some examples when the export phase is executed + +#### Export phase + +##### Export to OCI using run-image full saved on disk + +```=shell +> export CNB_USE_LAYOUT=true +> export CNB_LAYOUT_DIR=/layout-repo +> /cnb/lifecycle/exporter my-app-image + +# OR + +> /cnb/lifecycle/exporter -layout -layout-dir /layout-repo my-app-image +``` + +The output will be written into the repository folder described above and it should looks like this: + +```=shell +layout-repo +└── index.docker.io + ├── cnb + │ └── my-full-stack-run:bionic + │ └── bionic + │ └── blobs + │ ├── sha256 + │ │ ├── 1f59...944a // manifest + │ │ ├── 6388...af5a // config + │ │ ├── 824b...f984e + │ │ ├── f5f7...5b38 + │ │ └── 870e...f1b09 + │ ├── index.json + │ └── oci-layout + └── library + └── my-app-image + └── latest + ├── blobs + │ └── sha256 + │ ├── 1bcd5..x // app image manifest + │ ├── 2f789..d // app image config + │ ├── 824b...f984e // run layer + │ ├── f5f7...5b38 // run layer + │ ├── 870e...f1b09 // run layer + │ └── 3g234..f // buildpack layer + ├── index.json + └── oci-layout + +``` + +As we can see, the application image `my-app-image` contains a **full** copy of the layers in its `blobs` folder. + + +##### Export to OCI using run-image partially saved on disk + +```=shell +> export CNB_USE_LAYOUT=true +> export CNB_LAYOUT_DIR=/layout-repo +> /cnb/lifecycle/exporter my-app-image + +# OR + +> /cnb/lifecycle/exporter -layout -layout-dir /layout-repo my-app-image +``` + +Expected output: + +```=shell +layout-repo +└── index.docker.io + ├── cnb + │ └── my-partial-stack-run:bionic + │ └── bionic + │ ├── blobs + │ │ └── sha256 + │ │ ├── 1f59...944a // manifest + │ │ └── 6388...af5a // config + │ ├── index.json + │ └── oci-layout + └── library + └── my-app-image + └── latest + ├── blobs + │ └── sha256 + │ ├── 1bcd5..x // app image manifest + │ ├── 2f789..d // app image config + │ └── 3g234..f // buildpack layer + ├── index.json + └── oci-layout + +``` + +As we can see, the application image `my-app-image` has missing `blobs` because they were not provided as input and the lifecycle just **skip writing** those layers on disk. + +##### Using -layout flag in combination with --daemon or --publish flags + +Any combination of using multiple sources or sinks in the Lifecycle invocation of phases should throw an error to the user. For example: + +```=shell +> export CNB_USE_LAYOUT=true +> export CNB_LAYOUT_DIR=/layout-repo +> /cnb/lifecycle/exporter -daemon -run-image cnb/my-full-stack-run:bionic my-app-image + +# OR + +> /cnb/lifecycle/exporter -layout -layout-dir /layout-repo -daemon -run-image cnb/my-full-stack-run:bionic my-app-image + +ERROR: exporting to multiple targets is unsupported +``` + +# How it Works +[how-it-works]: #how-it-works + +The lifecycle phases affected by this new behavior are: + - [Analyze](https://buildpacks.io/docs/concepts/components/lifecycle/analyze/) + - [Restore](https://buildpacks.io/docs/concepts/components/lifecycle/restore/) + - [Export](https://buildpacks.io/docs/concepts/components/lifecycle/export/) + - [Create](https://buildpacks.io/docs/concepts/components/lifecycle/create/) + +At a high level view the proposed solution can be summarized with the following system landscape diagram from the C4 model + +![](https://i.imgur.com/y972lTD.png) + +Notice that we are relying on the OCI format Specification to expose the data for `Platforms` + +The following new inputs are proposed to be added to these phases + + | Input | Environment Variable | Default Value | Description + |-------|-----------------------|---------------|-------------- + | `` | `CNB_USE_LAYOUT` | false | Enables the capability of resolving image from/to in OCI layout format on disk | + | `` | `CNB_LAYOUT_DIR` | | Path to a directory where the images are saved in OCI layout format| + +## How to map an image reference into a path in the layout repository + +In the previous examples one key element was how to translate an image reference into a path, let's define those rules. + +Considering an **image reference** refers to either a tag reference or digest reference. It could have the following formats +- A name reference refers to an identifier of form `//:` +- A digest reference refers to a content addressable identifier of form `//@:` + +The image look up will be done following these rules: + - WHEN `the image points to a name reference` + - Lifecycle will load/save the image from/to disk in [OCI Image Layout](https://github.com/opencontainers/image-spec/blob/main/image-layout.md) format at `////` + - WHEN `the image points to a digest reference` + - Lifecycle will load the image from disk in [OCI Image Layout](https://github.com/opencontainers/image-spec/blob/main/image-layout.md) format at `/////` + - WHEN `` is not provided default value will be **index.docker.io** + - IF `` is not also provided, then default value will be **library** + + +## Examples + +In all the examples the new feature is enabled by the use of the new flag `-layout` or by setting +the new environment variable `CNB_USE_LAYOUT` to true. + +Let's review some previous examples + +#### Analyze phase + +##### Analyzing run-image full saved on disk + +Command: + +```=shell +> export CNB_USE_LAYOUT=true +> export CNB_LAYOUT_DIR=/layout-repo +> /cnb/lifecycle/analyzer -run-image cnb/my-full-stack-run:bionic my-app-image + +# OR + +> /cnb/lifecycle/analyzer -layout -layout-dir /layout-repo -run-image cnb/my-full-stack-run:bionic my-app-image +``` + +Arguments received: + + - `run-image`: `cnb/my-full-stack-run:bionic` + - `image`: `my-app-image` + +The `` is set with the value `/layout-repo` + +Lifecycle applies the rules for looking up the images: + - It takes the **tag reference** `cnb/my-full-stack-run:bionic`, applies the conversion rules and gets `/index.docker.io/cnb/my-full-stack-run/bionic` + - It will append the `` at the beginning, getting the following path: `/layout-repo/index.docker.io/cnb/my-full-stack-run/bionic` + - It will look for an image saved on disk in [OCI Image Layout](https://github.com/opencontainers/image-spec/blob/main/image-layout.md) format at path `/layout-repo/index.docker.io/cnb/my-full-stack-run/bionic`. + - In case of the *application image* it will look at path `/layout-repo/index.docker.io/library/my-app-image/latest` + +Because both images are found, the phase is executed as usual and the `analyzed.toml` file will be updated. The `run-image.reference` added into the `analyzed.toml` will contain the path resolved by the lifecycle plus the digest reference to the image with the following format `[path]@[digest]`. In case of this example, it will look like this: + +```=toml +[run-image] + reference = "/layout-repo/index.docker.io/cnb/my-partial-stack-run/bionic@sha256:fab3bb83de466ed29d7e9dcfdbee5b5fb2ff90e91bc849af85b261b4c2062a7a" +``` + +##### Analyzing run-image partial saved on disk + +Command received: + +```=shell +> export CNB_USE_LAYOUT=true +> /cnb/lifecycle/analyzer -run-image cnb/cnb/my-partial-stack-run:bionic my-app-image + +# OR + +> /cnb/lifecycle/analyzer -layout -run-image cnb/cnb/my-partial-stack-run:bionic my-app-image +``` + +Arguments received: + + - `run-image`: `cnb/my-full-partial-run:bionic` + - `image`: `my-app-image` + +The `` is set with the default value `/layout-repo` + +Noticed the structure of the `run-image` provided + +```=shell +layout-repo +└── index.docker.io + └── cnb + └── my-partial-stack-run:bionic + └── bionic + ├── blobs + │ └── sha256 + │ ├── 1f59...944a // manifest + │ └── 6388...af5a // config + ├── index.json + └── oci-layout +``` + +Similar to the previous example, Lifecycle applies the rules for looking up the images and look at path `/layout-repo/index.docker.io/cnb/my-partial-stack-run/bionic` and it determines a partial image was provided and execute the phase with the information from the **Image Manifest** and the **Image Config** + +The output `analyzed.toml` will also include the new `run-image.reference` field the path and the digest of the run image. + +```=toml +[run-image] + reference = "/layout-repo/index.docker.io/cnb/my-partial-stack-run/bionic@sha256:fab3bb83de466ed29d7e9dcfdbee5b5fb2ff90e91bc849af85b261b4c2062a7a" +``` + +##### Analyzing previous-image + +Command received: + +```=shell +> export CNB_USE_LAYOUT=true +> /cnb/lifecycle/analyzer -run-image cnb/my-full-stack-run:bionic -previous-image bar/my-previous-app my-app-image + +# OR + +> /cnb/lifecycle/analyzer -layout -run-image cnb/my-full-stack-run:bionic -previous-image bar/my-previous-app my-app-image +``` + +Arguments received: + +- `run-image`: `cnb/my-full-stack-run:bionic` +- `previous-image`: `bar/my-previous-app` +- `image`: `my-app-image` + +The `` is set with the default value `/layout-repo` + +`run-image` and `image` arguments are treated in the same way as previous examples, and for `previous-image` argument the looking up images rules are applied and Lifecycle will look at path `/index.docker.io/bar/my-previous-app` for a image in [OCI Image Layout](https://github.com/opencontainers/image-spec/blob/main/image-layout.md) format. + +The `analyzed.toml` file es expected to be updated with the `previous-image.reference` containing the path and the digest of the `previous-image` + +```=toml +[run-image] + reference = "/layout-repo/index.docker.io/cnb/my-full-stack-run/bionic@sha256:fab3bb83de466ed29d7e9dcfdbee5b5fb2ff90e91bc849af85b261b4c2062a7a" + +[previous-image] + reference = "/layout-repo/index.docker.io/bar/my-previous-app/latest@sha256:aa0cf7fc8f161bdb96166c1644174affacd70d17f372373ca72c8e91116e2d43" + +``` + +Let's check how the `export` examples works on detailed + +##### Export to OCI using run-image full saved on disk + +Pre-conditions: + +The following directories are accessible by the lifecycle +```=shell +/ +├── layout-repo +│ └── index.docker.io +│ └── cnb +│ └── my-full-stack-run:bionic +│ └── bionic +│ └── blobs +│ ├── sha256 +│ │ ├── 1f59...944a // manifest +│ │ ├── 6388...af5a // config +│ │ ├── 824b...f984e +│ │ ├── f5f7...5b38 +│ │ └── 870e...f1b09 +│ ├── index.json +│ └── oci-layout +└── layers + └── analyzed.tom +``` + +The `/layers/analyzed.toml` file contains the following data: + +```=toml +[run-image] + reference = "/layout-repo/index.docker.io/cnb/my-full-stack-run/bionic@sha256:fab3bb83de466ed29d7e9dcfdbee5b5fb2ff90e91bc849af85b261b4c2062a7a" + +``` + +Command executed: + +```=shell +> export CNB_USE_LAYOUT=true +> /cnb/lifecycle/exporter my-app-image + +# OR + +> /cnb/lifecycle/exporter -layout my-app-image +``` + +Arguments received: + +- `image`: `my-app-image` + +The `` is set with the default value `/layout-repo` + +Lifecycle: + - It will read the `[run-image]` section in the `analyzed.toml`, it will parse `reference` attribute using the `@` separator and load the `run-image` image saved on disk in [OCI Image Layout](https://github.com/opencontainers/image-spec/blob/main/image-layout.md) format at path `/layout-repo/index.docker.io/cnb/my-full-stack-run/bionic`. + - Lifecycle could also validate the digest of the image loaded is the same as the one established by the `reference`. + - Lifecycle will execute the export steps and at the end of the process it will write the *application image* at path `/layout-repo/index.docker.io/library/my-app-image/latest` in [OCI Image Layout](https://github.com/opencontainers/image-spec/blob/main/image-layout.md) format + + +The output image will be written at: + +```=shell +layout-repo +└── index.docker.io + └── library + └── my-app-image + └── latest + ├── blobs + │ └── sha256 + │ ├── 1bcd5..x // app image manifest + │ ├── 2f789..d // app image config + │ ├── 824b...f984e // run layer + │ ├── f5f7...5b38 // run layer + │ ├── 870e...f1b09 // run layer + │ └── 3g234..f // buildpack layer + ├── index.json + └── oci-layout + +``` + +## Proof of concept + +In order to validate the feasibility of the proposed feature, we developed a proof of concept with one of the most important side effects this capability can add into the project: **Removing the Daemon Support**. You can also check a recording with the demo in the following [link](https://drive.google.com/file/d/1W1125OHuyUlx88BRroUTLBfrFHhFM5A9/view?usp=sharing) + +As mentioned earlier, if we want to remove the daemon support in the Lifecycle, then all the responsibility to deal with it goes into the platforms implementors, that means, for example: +- Pull the require dependencies (runtime image for example), save them on disk in OCI layout format and pass it through the lifecycle using the `` parameter +- Push the application image (exported in OCI layout format) into the Daemon, because that is what users are expecting. + +During the proof of concept implementation I choose to use [skopeo](https://github.com/containers/skopeo) tool to solve the problem of interacting with the Daemon. The reason to do it was **simplicity** for the PoC developed but we believe this is a good subject to talk about with the community. + +The following workflow was developed: +- Pack download the [skopeo image](https://github.com/containers/skopeo/blob/main/install.md#container-images), similar as it is downloading the other dependencies (Lifecycle, Buildpacks) +- Pack executes [skopeo copy](https://github.com/containers/skopeo/blob/main/docs/skopeo-copy.1.md) command in a container + - Copy image from the Daemon into the filesystem, in OCI layout format, before running Lifecycle + - Copy image from filesystem into the Daemon after the export phase was executed + +The following Dynamic Diagram from the C4 model, can give a little idea of the pieces implemented during the Poc + +![](https://i.imgur.com/SkY3l62.png) + +### Measuring of performance impact + +In order to have an idea on how much is affected the performance of exporting to the Daemon using the OCI layout format, the following metrics were taken. + +Using a local workstation with the following specifications: +- **(MacOS 12.3.1 / 2,4 GHz 8-Core Intel Core i9 / 32 GB 2667 MHz DDR4 / 1 TB APFS SSD HD)** + +We built 5 times the Java, Kotlin and Ruby samples codes from our [repository](https://github.com/buildpacks/samples/tree/main/apps) and took the building's average time using the Daemon and the OCI layout format approaches. + +The table above summarized the results we got. + +![](https://i.imgur.com/zuPZ6Xk.png) + +Times are expressed in **seconds** and the first thing we noticed is for Java and Kotlin the `build time` can be affected by the network and the availability of maven repositories, so I decided to take the `same build time` to compare both approaches. + +Here are my thoughts about these results: +- Java and Kotlin behavior are very similar, exporting only to OCI format increases 5% the time compared to Daemon approach, and from user perspective it represents a 5 seconds increase of time. +- On the other hand for the Ruby application, exporting to OCI format represents a 20% increase of the time but from user perspective it is only 1.5 seconds which is probably difficult to notice from user perspective. +- When the time spent for Pack to prepare the environment for the lifecycle execution (downloading the run-image from a registry to OCI format) and loading the OCI image from disk to the Daemon (which is the expected behavior from Users) is added, then: + - The Java and Kotlin applications time increases was **13%**, representing **+13 seconds** from user perspective + - The Ruby application increases **82%** but from user's side it represents **+7 seconds** + +Let's take a look on what happened when we execute a build for the second time, the table below summarized the results + +![](https://i.imgur.com/zDAOZU6.png) + +On these cases we can see the behavior is consistent compared with the previous case, Java and Kotlin application shows a **5% increase** of time but Ruby application, because it's process time is smaller the sensibility to variation is bigger (23%) but in reality it represents a **+1 second** of difference for the User. Also, when the pre and post processing time is added the variations are bigger for all the applications. As mentioned, [skopeo](https://github.com/containers/skopeo) tool was used here and most of the time spent goes into this category. + +I think, this PoC demonstrate that adding the exporting to OCI layout format is a valuable feature for the project, it opens the door to deprecate the use of Daemon but it will requires that platform implementors to prepare and post-process the output on disk on a smart way to reduce the performance penalties to users. + +# Migration +[migration]: #migration + +## For the scope of this RFC + +- No breaking changes were identified + +# Drawbacks +[drawbacks]: #drawbacks + +- We could increase the disk space if we do not manage the duplication of saving the layers on disk. The proposal suggests to use symbolic links to reference layers on disk and avoid duplication. + +# Alternatives +[alternatives]: #alternatives + +- What other designs have been considered? + - Doing nothing, just keep exporting only to Daemon or Registry + +- What other designs have been considered for removing the Daemon support? + - Instead of exporting to OCI layout format, the other approach considered is exporting to [registry only](https://github.com/buildpacks/rfcs/blob/jjbustamante/feature/deprecate-daemon/text/0000-deprecate-daemon.md#lifecycle-registry-only-approach). In this case, the Lifecycle only interacts with registries. + As part of the PoC, I took some metrics to compare impact of using a ephemeral registry to publish the application image. The strategy done to capture the metrics was: + - I used a script to set up a local [container registry](https://hub.docker.com/_/registry) before executing the `pack build` command + - For the **first build** metrics, the registry was destroyed/re-created before each execution + - `pack build` command was configure to `--publish` in the local registry + - I didn't use the [skopeo]() in these cases to complete the pushing into the Daemon + + Here are the results: + + ![](https://i.imgur.com/vtOjxJP.png) + + - The results are actually very similar to exporting to OCI layout format for Java and Kotlin, but Ruby application is actually worst. + + ![](https://i.imgur.com/FfbqfF6.png) + + - Second build is actually better compared with the export to OCI in disk, Java and Kotlin increases the time just by **2%**, but Ruby again is worst + + Some thoughts about this approaches + + - **Process Management:** Platforms must now manage a parallel process (registry in the daemon). This would entail ensuring that the registry is started and cleaned up appropriately. + - **Networking:** There are additional network complications in order to route images to the ephemeral registry. For example, [network drivers](https://docs.docker.com/network/#network-drivers), [proxy](https://docs.docker.com/desktop/networking/#httphttps-proxy-support) and [DNS configuration](https://docs.docker.com/config/containers/container-networking/#dns-services), [host name resolution](https://docs.docker.com/desktop/networking/#i-want-to-connect-from-a-container-to-a-service-on-the-host), and [TLS certificates](https://betterprogramming.pub/deploy-a-docker-registry-using-tls-and-htpasswd-56dd57a1215a) to name a few. + +- Why is this proposal the best? [OCI Image Layout](https://github.com/opencontainers/image-spec/blob/main/image-layout.md) format is a standard from which other tools can create a [OCI Runtime Specification bundle](https://github.com/opencontainers/runtime-spec/blob/v1.0.0/bundle.md) exporting to this format enables Platforms to implement any feature in the top of this format, for example, exporting to [containerd](https://containerd.io) has been [requested](https://github.com/buildpacks/lifecycle/issues/829) by the community and it could be implemented if we can get the application image exported in [OCI Image Layout](https://github.com/opencontainers/image-spec/blob/main/image-layout.md) format. +- What is the impact of not doing this? Probably will never remove the Daemon support in the Lifecycle + +# Prior Art +[prior-art]: #prior-art + +- Discussion around removing the Daemon support [RFC](https://github.com/buildpacks/rfcs/blob/jjbustamante/feature/deprecate-daemon/text/0000-deprecate-daemon.md) + +# Unresolved Questions +[unresolved-questions]: #unresolved-questions + +- What parts of the design do you expect to be resolved before this gets merged? + - Tools like [umoci](https://umo.ci/) used to create a runtime bundle from an image in OCI layout format, requires the [annotation](https://github.com/opencontainers/image-spec/blob/main/annotations.md#pre-defined-annotation-keys) `org.opencontainers.image.ref.name` to be present. Also tools like [skopeo](https://github.com/containers/skopeo) when an image is `copy` in oci format the annotation is included. + We are not adding the annotation as part of the Buildpacks Specification, but in this case this could make our output incompatible with some other tooling. + - **Answer:** we agreed on adding `org.opencontainers.image.ref.name` annotation + - Exporting to a tarball can be handled on this RFC or a new one must be created? + - **Answer:** this can be handled on a different RFC + +- What parts of the design do you expect to be resolved through implementation of the feature? + - Handle symbolic links to the blobs in the `` repository, this could be more efficient on hard drive space + - **Answer:** this can be handled on the implementation side + + +# Spec. Changes (OPTIONAL) +[spec-changes]: #spec-changes + +The [Platform Interface Specification](https://github.com/buildpacks/spec/blob/platform/0.11/platform.md#inputs-5) must be updated to include the following inputs to the [Create](https://buildpacks.io/docs/concepts/components/lifecycle/create/), [Analyze](https://buildpacks.io/docs/concepts/components/lifecycle/analyze/) and [Export](https://buildpacks.io/docs/concepts/components/lifecycle/export/) phases + +| Input | Environment Variable | Default Value | Description| +|-------|-----------------------|---------------|------------| +| `` | `CNB_USE_LAYOUT` | false | Enables the capability of resolving image from/to in OCI layout format on disk | +| `` | `CNB_LAYOUT_DIR` | | Path to a directory where the images are saved in OCI layout format| + +Also the `analyzed.toml` [file](https://github.com/buildpacks/spec/blob/platform/0.11/platform.md#analyzedtoml-toml) will be updated to include the `reference` format in case of layout is being used. + +```=toml +[image] + reference = "" + +[run-image] + reference = "" + +[previous-image] + reference = "" +``` + +Where + +- `[image|run-image|previos-image].reference` MUST be either: + - A digest reference to an image in an OCI registry + - The ID of an image in a docker daemon + - The path to an image in OCI layout format + diff --git a/text/0120-cvebackports.md b/text/0120-cvebackports.md new file mode 100644 index 000000000..1d20a99ff --- /dev/null +++ b/text/0120-cvebackports.md @@ -0,0 +1,121 @@ +# Meta +[meta]: #meta +- Name: CVE discretional Patching +- Start Date: 2023-03-08 +- Author(s): joe-kimmel-vmw, natalieparellano +- Status: Approved +- RFC Pull Request: [rfcs#281](https://github.com/buildpacks/rfcs/pull/281) +- CNB Pull Request: (leave blank) +- CNB Issue: N/A +- Supersedes: N/A + +# Summary +[summary]: #summary +This RFC describes how maintainers MAY issue patch releases in response to critical and high severity CVEs being detected in past or current releases of the lifecycle binary. + +# Definitions +[definitions]: #definitions +CVE - Literally expands to “Common Vulnerabilities and Exposures” but in general it refers to a security gap which could be exploited by a malicious attacker, which can be fixed by patching a single component. + +CVE Severity: CVEs are announced with a severity score. While this score can vary between vendors, it is made by considering many factors including how easy it is to take advantage of the exploit, what resources are exposed by the exploit, and whether there is already a known exploit in circulation. See https://nvd.nist.gov/vuln-metrics/cvss/v3-calculator + +Critical and High are the two highest severity ratings available under the current severity rating system. + + +# Motivation +[motivation]: #motivation + +- Why should we do this? + +Patching CVEs is industry best practice. By providing patched updates of recent releases we enable CNB-using organizations with low risk tolerance or high upgrade friction to make a minimal change to their runtime infrastructure that still keeps secure against known vulnerabilities. +Following patch version line architecture also boosts user trust vs. a pledge to roll forward security fixes to higher version lines with the promise of no breaking changes, which would be looked upon skeptically by some enterprise users. Those same users who are skeptical of any & all promises that new versions won’t have any regressions are especially likely to be running older versions in the wild, thus providing patches to older versions ensures that they can continue to use CNB lifecycle without risking exposure to the riskiest CVEs. + +- What use cases does it support? + +This is especially important for users who do not use CNB as part of a SaaS product, i.e. for “on-prem” deployments. These on-prem users are pulling down the provided lifecycle images and running the binaries on their own infrastructure, thus increasing their potential risk exposure and liability. + +Additionally, as we do have concrete plans to deprecate platform API versions in the lifecycle this calendar year, there's increased likelihood to learn of +other lifecycle consumers who are not ready to upgrade and who would appreciate patch releases. + +- What is the expected outcome? + +Maintainers MAY issue patch releases in response to critical and high CVEs. Most importantly, users MAY upgrade to consume these patch releases in a timely manner if they are not comfortable consuming the latest version. + +# What it is +[what-it-is]: #what-it-is + +It is risk mitigation for consumer and enterprise customers who want to apply critical patches without any other changes that would come with a minor version upgrade. + +# How it Works +[how-it-works]: #how-it-works + +Patch releases will be published at the discretion of the maintainers in response to Critical and High CVEs. + +The lifecycle will still offer the same strong backwards compatibility guarantees as ever. + +Existing process (patch most recent version N until it becomes N-1) bumps dependencies and the go version each month irrespective of CVEs being present. This proposal does not involve changing that process. + +# Migration +[migration]: #migration + +N/A + +# Drawbacks +[drawbacks]: #drawbacks + +Maintaining past releases does take time. However under this proposal that maintenance is optional and performed at the discretion of the maintainers. + +For a vague guess at the volume of this work: In the year from March 2022 March 2023, the grype scanner found 3 High and 0 Critical CVEs in the 0.13.5 lifecycle binary, so there would have been at most 3 additional patch releases of that line. +Similarly, grype scanner found 1 High and 0 Critical CVEs in the 0.14.3 lifecycle image (from October 2022), and 0 High or Critical CVEs in the 0.15.3 lifecycle image (from Jan 2023). + + + +# Alternatives +[alternatives]: #alternatives + +- What other designs have been considered? + - Only Patch the Most Recent Release: This is a fine idea but it only works in a world where all users are willing to migrate to the latest release. This RFC addresses the wants and needs of users who are not comfortable performing minor version upgrades, even with backwards compatibility guarantees, in order to address CVEs. + - Only Patch development trunk, and wait for the next release: This also only works in a world where minor version upgrades are seen as cheap or low-risk to perform. + - Don’t Patch CVEs: we probably wouldn’t ever do this option. + + +- Why is this proposal the best? +This is the only proposal palatable to orgs that view minor releases as high-risk and/or expensive. +- What is the impact of not doing this? +We risk alienating some enterprise users of CNB. + +# Prior Art +[prior-art]: #prior-art + +N/A + +# Unresolved Questions +[unresolved-questions]: #unresolved-questions + +N/A beyond general feedback and consensus. + +# Spec. Changes (OPTIONAL) +[spec-changes]: #spec-changes + +N/A + +# History +[history]: #history + + diff --git a/text/0121-kpack-donation-to-cnb.md b/text/0121-kpack-donation-to-cnb.md new file mode 100644 index 000000000..4316a97e6 --- /dev/null +++ b/text/0121-kpack-donation-to-cnb.md @@ -0,0 +1,409 @@ +# Meta + +- Name: kpack donation to CNB +- Start Date: 2022-06-21 +- Author(s): [Juan Bustamante](https://github.com/jjbustamante/) +- Status: Approved +- RFC Pull Request: [rfcs#235](https://github.com/buildpacks/rfcs/pull/235) +- CNB Pull Request: (leave blank) +- CNB Issue: N/A +- Supersedes: (put "N/A" unless this replaces an existing RFC, then link to that RFC) + +# Summary + +This RFC proposes the donation of the open-source project [kpack](https://github.com/pivotal/kpack/) into the [Cloud Native Buildpacks Community Organization](https://github.com/buildpacks-community) as a vendor neutral staging ground under the CNB governance umbrella. Once the project is deemed sufficiently mature, the project will be moved under the [Cloud Native Buildpacks Organization](https://github.com/buildpacks). + +Following the process defined in the [Buildpack Commnity RFC](https://github.com/buildpacks/rfcs/blob/main/text/0117-buildpacks-community.md) the following table presents the criteria used to evaluate the project. + +| Criteria | Evidence | +|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| The project must be a tooling, platform or integration that is related to Cloud Native Buildpacks. | See [Motivation](#motivation) section | +| The project must be open source and licensed under Apache 2.0. | See [License](https://github.com/pivotal/kpack/blob/main/LICENSE) | +| List of all external dependencies with licensing info and they’re permissively licensed with a Apache 2.0 compatible license | See [report](https://drive.google.com/file/d/1SvPdl69Lhn0TTx_TaxcesfajuYY9uiNd/view?usp=share_link) generated using [go-licenses](https://github.com/google/go-licenses) | +| It must follow the Cloud Native Computing Foundation Code of Conduct. | See [Code of conduct](https://github.com/pivotal/kpack/blob/main/CODE_OF_CONDUCT.md) | +| The project must enable DCO signoff for all commits. | See [Sign-off process](https://github.com/pivotal/kpack/blob/main/CONTRIBUTING.md#sign-off-process) | +| The project must be open to contributions and have a public issue tracker. | See public [issue tracker](https://github.com/pivotal/kpack/issues) | +| The project must have a governance document that clearly defines the project maintainers and how they are elected. Each project may choose to define their own governance model as long as it is clearly documented and allows for project maintainers to be elected from the community. | See [Governance](https://github.com/pivotal/kpack/blob/main/GOVERNANCE.md) | +| The list of project maintainers must be publicly available and controlled through a Github team. | See [Maintainers](https://github.com/pivotal/kpack/blob/main/MAINTAINERS.md) | +| The project must use a CODEOWNERS file to define the maintainers for each repository. The CODEOWNERS file should reference the Github team that controls the list of maintainers. | See [CODEOWNERS](https://github.com/pivotal/kpack/blob/main/CODEOWNERS) file | +| All project contributors must be members of the Buildpacks community organization. | See [Team Roles](#team-roles) section and [People](https://github.com/orgs/buildpacks-community/people) in CNB community organization | +| The project must be actively maintained (i.e. issues and pull requests must be addressed regularly, approved pull requests must be merged or updated in a timely manner, etc.). | See [issues](https://github.com/pivotal/kpack/issues) and [pull requests](https://github.com/pivotal/kpack/pulls) | +| There should have visible automated testing for all repositories that are part of the project. | See [codecov](https://app.codecov.io/gh/pivotal/kpack) | +| The project maintainers must conform to a set of best effort SLOs around patching critical CVEs when applicable to the project. | | +| The has a file - CONTRIBUTING.md: A guide to how contributors should submit patches and the expectations around code review. | See [Contributing](https://github.com/pivotal/kpack/blob/main/CONTRIBUTING.md) | +| The has a file - DEVELOPMENT.md: A guide to how contributors should develop the project. | See [Development](https://github.com/pivotal/kpack/blob/main/DEVELOPMENT.md) | +| The has a file - ADOPTERS.md: A list of adopters of the project. | See [Adopters](https://github.com/pivotal/kpack/blob/main/ADOPTERS.md) | +| The has a file - VERSIONING.md: A guide to how versioning is done for the project. | See [Versioning](https://github.com/pivotal/kpack/blob/main/VERSIONING.md) | +| The has a file - RELEASE.md: A guide to how releases are done for the project. | See [Release](https://github.com/pivotal/kpack/blob/main/RELEASE.md) | +| The has a file - SECURITY.md: A guide to how security vulnerabilities should be reported. | See Security [Pull Request](https://github.com/pivotal/kpack/pull/1149) | + +# Definitions + +- [Kubernetes](https://kubernetes.io/) is an open-source system for automating deployment, scaling, and management of containerized applications. +- [Kpack](https://github.com/pivotal/kpack/) is a VMware-led open-source project that utilizes [Kubernetes](https://kubernetes.io/) primitives to build OCI images as a [platform](https://buildpacks.io/docs/concepts/components/platform/) implementation of [Cloud Native Buildpacks](https://buildpacks.io/). +- A Kubernetes native application is an application designed to run on Kubernetes platforms, managed by Kubernetes APIs and `kubectl` tooling and cohesively deployed on Kubernetes as a single object. + +# Motivation + +### Why should we do this? + +It will benefit the [CNB](https://buildpacks.io/) project by adding a tool to support an out-of-the box [Kubernetes](https://kubernetes.io/) integration, which is part of the [CNB](https://buildpacks.io/) [roadmap](https://github.com/buildpacks/community/blob/main/ROADMAP.md#integration-with-the-cloud-native-ecosystem) goals. + +It will show evidence to the community that the project supports multiple [platform interface specification](https://github.com/buildpacks/spec/blob/main/platform.md) implementers increasing community's confidence on the flexibility of specification maintained by the [CNB](https://buildpacks.io/) project. + +It will help the [CNB](https://buildpacks.io/) community (+550 members on slack channel) to grow by adding all the [kpack](https://github.com/pivotal/kpack/) community into [CNB](https://buildpacks.io/) space. + +[CNB](https://buildpacks.io/) is part of the [Cloud Native Computing Foundation](https://www.cncf.io), an open source, vendor neutral hub of cloud native computing projects, the inclusion of [kpack](https://github.com/pivotal/kpack/) under this umbrella will provide more opportunity to the community: + +- Increase in adopters, users looking to use buildpacks in [Kubernetes](https://kubernetes.io/) will find a tool supported and maintained by the [CNB team](https://github.com/buildpacks/community/blob/main/TEAMS.md). +- Improve efficiency, ensuring that the roadmaps of the two projects are closer aligned will make it easier to coordinate efforts between both communities. + +### What use cases does it support? + +[kpack](https://github.com/pivotal/kpack/) will add support to operators by providing declarative [Kubernetes](https://kubernetes.io/) resources (images, builders, or stacks for example) to monitor for security patches on the underlying builder's buildpacks or stacks and rebuild the OCI image when changes are detected, allowing platforms to roll out new versions of the applications when vulnerabilities are fixed. + +### How does kpack support the goals and use cases of the project? + +The [CNB](https://buildpacks.io/) project turns application source code into OCI-compliant container images; in order to do that, it defines a platform-to-buildpack contract that guarantees interoperability between different implementers. + +The [CNB](https://buildpacks.io/) project embraces modern container standards, and [Kubernetes](https://kubernetes.io/) has become the industry standard for automating deployment, scaling, and management of containerized applications. + +[kpack](https://github.com/pivotal/kpack/) fits perfectly in that direction because it implements the [platform interface specification](https://github.com/buildpacks/spec/blob/main/platform.md) and because is a [Kubernetes](https://kubernetes.io/) native application its community possesses a vast knowledge that can provide valuable feedback to the CNB project. + +### Is there functionality in kpack that is already provided by the project? + +[pack](https://github.com/buildpacks/pack) and [kpack](https://github.com/pivotal/kpack/) offer similar functionality (both tools implement the [platform interface](https://github.com/buildpacks/spec/blob/main/platform.md)[specification](https://github.com/buildpacks/spec/blob/main/platform.md)) but they do it for two non-overlapping contexts: while the first one targets developers and local builds, [kpack](https://github.com/pivotal/kpack/) manages containerization on day-2 and at scale and is a [Kubernetes](https://kubernetes.io/) native implementation. + +### Is kpack integrated with another service or technology that is widely used? + +As mentioned earlier, [kpack](https://github.com/pivotal/kpack/) implements the [platform interface specification](https://github.com/buildpacks/spec/blob/main/platform.md) on [Kubernetes](https://kubernetes.io/), a standard nowadays for automating deployment, scaling, and management of containerized applications. + +# What it is + +[Kubernetes](https://kubernetes.io/docs/concepts/overview/what-is-kubernetes/) is a portable, extensible, open-source platform for managing containerized workloads and services. The [Kubernetes](https://kubernetes.io/docs/concepts/overview/what-is-kubernetes/) API can be extended in different ways; one of them is using [custom resources](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/), a custom resource represents a customization of a particular [Kubernetes](https://kubernetes.io/docs/concepts/overview/what-is-kubernetes/) installation. + +[kpack](https://github.com/pivotal/kpack/) extends [Kubernetes](https://kubernetes.io/) using [custom resources](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) and utilizes unprivileged [Kubernetes](https://kubernetes.io/) primitives to provide builds of OCI images as a platform implementation of [Cloud Native Buildpacks](https://buildpacks.io/). This means that [kpack](https://github.com/pivotal/kpack/) takes the CNB-defined concepts (image, builder, stacks, etc) and bakes them into the Kubernetes extension model using custom resources and exposing a declarative API for interacting with it. + +The declarative API enforces a separation of responsibilities. Operators declare the configuration for a CNB image or define which buildpacks or stacks must be used, and [kpack](https://github.com/pivotal/kpack/) - using its custom controller - will take care of the heavy lifting, keeping the state of the custom objects in sync with the declared desired state. + +# How it Works + +As mentioned before, [kpack](https://github.com/pivotal/kpack/) uses the [custom resource](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) extension point to provide the capabilities of building OCI images as a platform implementation of [Cloud Native Buildpacks](https://buildpacks.io/). + +These custom resources have a common definition similar to this: + +```yaml +apiVersion: kpack.io/v1alpha2 +kind: [ClusterStack|ClusterStore|Image|Builder|Build] +metadata: + name: [unique name] +``` + +The _apiVersion_ key specifies which version of the Kubernetes API is used to create the object, in this case **kpack.io/v1alpha2** + +The _kind_ key specifies what kind of objects we want to create for example: **ClusterStack, ClusterStore, Image, Builder or Build** + +The _metadata_ key is used to define the data that can uniquely identify the object. One common key used around all the custom resources is to provide a _name_ to identify the object. + +Some of the [custom resources](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) implemented by [kpack](https://github.com/pivotal/kpack/) are describe in the next section, if you want to check the complete reference go to [kpack](https://github.com/pivotal/kpack/) documentation [site](https://github.com/pivotal/kpack/tree/main/docs) + +## ClusterStack + +This resource is an abstraction to group a `build image` and a `run image` required to build the application source code. + +Let's see an example of the [ClusterStack](https://github.com/pivotal/kpack/blob/main/docs/stack.md) definition + +```yaml +apiVersion: kpack.io/v1alpha2 +kind: ClusterStack +metadata: + name: base +spec: + id: "io.buildpacks.stacks.bionic" + buildImage: + image: "my-buildpack-repo/build:cnb" + runImage: + image: "my-buildpack-repo/run:cnb" +``` + +The _spec_ key is used to define the desired state of the ClusterStack and the keys availables under _spec_ match the values expected in a CNB [stack](https://buildpacks.io/docs/concepts/components/stack/) definition: + +- _id_: The 'id' of the stack +- _buildImage.image_: The `build-image` of the [stack](https://buildpacks.io/docs/concepts/components/stack/). +- _runImage.image_: The `run-image` of the [stack](https://buildpacks.io/docs/concepts/components/stack/). + +## Cluster Store + +Creates a repository of buildpacks packaged as OCI artifacts to be used during a build. + +Let's see an example of the [ClusterStore](https://github.com/pivotal/kpack/blob/main/docs/store.md) definition + +``` yaml +apiVersion: kpack.io/v1alpha2 +kind: ClusterStore +metadata: + name: my-cluster-store +spec: + sources: + - image: foo.com/my-buildpack-repo/buildpack-1@sha256:sha123 + - image: foo.com/my-buildpack-repo/buildpack-2@sha256:sha345 + - image: foo.com/my-buildpack-repo/builder:base + ``` + +The _spec_ key is used to define the desired state of the [ClusterStore](https://github.com/pivotal/kpack/blob/main/docs/store.md) + +- _sources_: List of buildpackage images to make available in the ClusterStore. Each image is an object with the key _image_. + +As a side note the [ClusterStore](https://github.com/pivotal/kpack/blob/main/docs/store.md) resource will be deprecated in favor of a new Buildpack resource in the near future according to the following [RFC](https://www.google.com/url?q=https://github.com/pivotal/kpack/pull/931&sa=D&source=docs&ust=1665521917723122&usg=AOvVaw1eNN-XzLf5xiX1nvrHKMRE) + +## Builder or ClusterBuilder + +Creates a [CNB builder](https://buildpacks.io/docs/concepts/components/builder/) image that contains all the components necessary to execute a build. + +An example of the [Builder](https://github.com/pivotal/kpack/blob/main/docs/builders.md) definition is as follows: + +```yaml +apiVersion: kpack.io/v1alpha2 +kind: Builder +metadata: + name: my-builder +spec: + tag: foo.com/sample/builder + stack: + name: base + kind: ClusterStack + store: + name: my-cluster-store + kind: ClusterStore + order: + - group: + - id: my-buildpack-repo/buildpack-1 + - group: + - id: my-buildpack-repo/buildpack-2 + ``` + +It's important to notice that a [ClusterStack](https://github.com/pivotal/kpack/blob/main/docs/stack.md) and [ClusterStore](https://github.com/pivotal/kpack/blob/main/docs/store.md) is required for creating a [Builder](https://github.com/pivotal/kpack/blob/main/docs/builders.md). + +The _spec_ key is used to define the desired state of the [Builder](https://github.com/pivotal/kpack/blob/main/docs/builders.md) + +- _tag_: The tag to save the builder image. +- _stack.name_: The name of the stack resource to use as the builder stack. All buildpacks in the order must be compatible with the [ClusterStack](https://github.com/pivotal/kpack/blob/main/docs/stack.md). +- _stack.kind_: The type as defined in [Kubernetes](https://kubernetes.io/). This will always be [ClusterStack](https://github.com/pivotal/kpack/blob/main/docs/stack.md). +- _store.name_: The name of the [ClusterStore](https://github.com/pivotal/kpack/blob/main/docs/store.md) resource in [Kubernetes](https://kubernetes.io/). +- _store.kind_: The type as defined in [Kubernetes](https://kubernetes.io/). This will always be [ClusterStore](https://github.com/pivotal/kpack/blob/main/docs/store.md). +- _order_: The [builder order](https://buildpacks.io/docs/reference/builder-config/). + +The [ClusterBuilder](https://github.com/pivotal/kpack/blob/main/docs/builders.md#cluster-builders) resource is almost identical to a [Builder](https://github.com/pivotal/kpack/blob/main/docs/builders.md) but it is a cluster scoped resource that can be referenced by an [Image](https://github.com/pivotal/kpack/blob/main/docs/image.md) in any namespace. + +## Build + +Custom resource responsible for scheduling and running a single build. + +An example of a [Build](https://github.com/pivotal/kpack/blob/main/docs/build.md) definition is + +```yaml +apiVersion: kpack.io/v1alpha2 +kind: Build +metadata: + name: sample-build +spec: + tags: + -sample/image + builder: + image: foo.com/sample/builder + projectDescriptorPath: path/to/project.toml + source: + git: + url: https://github.com/my-account/sample-app.git + revision: main +``` + +The _spec_ key is used to define the desired state of the [Build](https://github.com/pivotal/kpack/blob/main/docs/build.md) + +- _tags_: A list of tags to build. At least one tag is required. +- _builder.image_: This is the tag to the [Cloud Native Buildpacks builder image](https://buildpacks.io/docs/concepts/components/builder/) to use in the build. +- _source_: The source location that will be the input to the build. +- _projectDescriptorPath_: Path to the [project descriptor file](https://buildpacks.io/docs/reference/config/project-descriptor/) relative to source root dir or subPath if set. + +## Image + +Provides a configuration to build and maintain an OCI image utilizing [CNB](https://buildpacks.io/). + +An example of an [Image](https://github.com/pivotal/kpack/blob/main/docs/image.md) definition is as follows + +```yaml +apiVersion: kpack.io/v1alpha2 +kind: Image +metadata: + name: my-app-image + namespace: default +spec: + tag: foo.com/my-app-repo/my-app-image + builder: + name: my-builder + kind: Builder + source: + git: + url: https://github.com/my-account/sample-app.git + revision: 82cb521d636b282340378d80a6307a08e3d4a4c4 +``` + +The _spec_ key is used to define the desired state of the [Image](https://github.com/pivotal/kpack/blob/main/docs/image.md) + +- _tag_: The image tag. +- _builder_: Configuration of the [builder](https://github.com/pivotal/kpack/blob/main/docs/builders.md) resource the image builds will use. +- source: The source code that will be monitored/built into images. + +# Contributors + +Contributions to [kpack](https://github.com/pivotal/kpack/) during the period 2022-2019 can be summarized as follow + +```mermaid +pie showData + title Pull Requests Open or Closed + "VMWare or Pivotal" : 438 + "Others" : 37 +``` + +# Migration + +### Repositories + +The suggested strategy for migrating [kpack's](https://github.com/pivotal/kpack/) git repositories to the [CNB](https://buildpacks.io/) is to use the [transfer repository](https://docs.github.com/en/repositories/creating-and-managing-repositories/transferring-a-repository#transferring-a-repository-owned-by-your-organization) git feature. + +The following table shows the candidates repositories to be transferred + +| Origin Repo | Description | Owner | Destination Repo | Owner | +| --- | --- | --- | --- | --- | +| [https://github.com/pivotal/kpack](https://github.com/pivotal/kpack) | kpack source code | Pivotal | [https://github.com/buildpacks-community/kpack](https://github.com/buildpacks/kpack) | [CNB Technical Oversight Committee](https://github.com/buildpacks/community/blob/main/GOVERNANCE.md#technical-oversight-committee) | +| [https://github.com/vmware-tanzu/kpack-cli](https://github.com/vmware-tanzu/kpack-cli) | kpack CLI | VMware | [https://github.com/buildpacks-community/kpack-cli](https://github.com/buildpacks/kpack-cli) | [CNB Technical Oversight Committee](https://github.com/buildpacks/community/blob/main/GOVERNANCE.md#technical-oversight-committee) | +| [https://github.com/vmware-tanzu/homebrew-kpack-cli](https://github.com/vmware-tanzu/homebrew-kpack-cli) | Homebrew tap for the kpack CLI | VMware | [https://github.com/buildpacks-community/homebrew-kpack-cli](https://github.com/buildpacks/homebrew-kpack-cli) | [CNB Technical Oversight Committee](https://github.com/buildpacks/community/blob/main/GOVERNANCE.md#technical-oversight-committee) | + +For each repository + +- The owner or admin user must follow the steps describe in github [documentation](https://docs.github.com/en/repositories/creating-and-managing-repositories/transferring-a-repository#transferring-a-repository-owned-by-your-organization) and transfer the repository to the organization [Cloud Native Buildpacks](https://github.com/buildpacks) +- A member of the [TOC team](https://github.com/orgs/buildpacks/teams/toc/members) in [CNB](https://buildpacks.io/) must accept the donation of the repository. The name of the destination repository will be the one described in the table above. + +### CI / CD Pipelines + +[kpack's](https://github.com/pivotal/kpack/) CI/CD pipelines were rebuilt to use [github actions](https://github.com/pivotal/kpack/tree/main/.github). +In order for [kpack's](https://github.com/pivotal/kpack/) to run windows acceptance tests it requires a kubernetes cluster with windows nodes. The hardware requirements are specify in the following section. + +##### Hardware requirements + +The minimal hardware requirements to request to CNCF to recreate the CI/CD pipelines are: + +###### Kubernetes clusters + +**Build cluster** + +- Linux nodes + - 1 amd64 node / 2 CPU / 8GB memory / 50GB ephemeral disk storage +- Windows nodes + - 1 amd64 node / 4 CPU / 16GB memory / 100GB ephemeral disk storage +- At least 100 GB of storage in a public OCI registry + +### Documentation + +[Kpack](https://github.com/pivotal/kpack/) documentation is currently hosted in the base code [repository](https://github.com/pivotal/kpack/tree/main/docs), after migrating to [CNB](https://buildpacks.io/) the documentation will be published into the Cloud Native Buildpack [site](https://buildpacks.io/). + +[CNB](https://buildpacks.io/) already mentioned [kpack](https://github.com/pivotal/kpack/) in their documentation, specifically, in the tools section. The proposal is: + +- Create a new folder name **kpack** inside the [tool](https://github.com/buildpacks/docs/tree/main/content/docs/tools) section in the docs repository +- Copy kpack's [documentation](https://github.com/pivotal/kpack/tree/main/docs) into this new created folder +- Update the references and all the required elements to format the documentation according to [CNB](https://buildpacks.io/) site + +### Governance + +#### Team roles + +Based on the [CNB governance policy](https://github.com/buildpacks/community/blob/main/GOVERNANCE.md) and the fact that [kpack](https://github.com/pivotal/kpack/) is a [platform](https://buildpacks.io/docs/concepts/components/platform/) implementation of [Cloud Native Buildpacks](https://buildpacks.io/), it will be added under the responsibility of the [CNB Platform Team](https://github.com/buildpacks/community/blob/main/TEAMS.md#Platform-Team). + +How do migrate roles and responsibilities into the CNB governance process? + +Currently, the [CNB Platform Team](https://github.com/buildpacks/community/blob/main/TEAMS.md#Platform-Team) already has a **team lead** assigned and, by definition, each team can have only one **team lead**. In order to provide the current [kpack](https://github.com/pivotal/kpack/) team with the same accountability for the migrated repositories the proposal is to follow the guidelines describe on the [Component Maintainer Role RFC](https://github.com/buildpacks/rfcs/pull/234) + +The [kpack's](https://github.com/pivotal/kpack/) maintainers that will be nominated as **component maintainer** in CNB are: + +| Name | Github account | Organization | +|------------------|----------------------------------------------------|--------------| +| Matthew McNew | [@matthewmcnew](https://github.com/matthewmcnew) | VMware | +| Tom Kennedy | [@tomkennedy513](https://github.com/tomkennedy513) | VMware | +| Daniel Chen | [@chenbh](https://github.com/chenbh) | VMware | +| Juan Bustamante | [@jjbustamante](https://github.com/jjbustamante) | VMware | + +Also, those members are willing to become more involved with CNB projects and become **Platform maintainers** in the near future. + +Outside VMware, the following contributors manifested their desired to become [kpack's](https://github.com/pivotal/kpack/) **component maintainer**. + +| Name | Github account | Organization | +|-----------------|--------------------------------------------------|--------------| +| Sambhav Kothari | [@samj1912](https://github.com/samj1912) | Bloomberg | +| Aidan Delaney | [@AidanDelaney](https://github.com/AidanDelaney) | Bloomberg | + +#### RFC process + +Once the migration is completed, [kpack](https://github.com/pivotal/kpack/) will follow the [RFC process](https://github.com/buildpacks/rfcs) and [RFC template](https://github.com/buildpacks/rfcs/blob/main/0000-template.md) stablished in CNB project for any new RFC created in the project. + +##### Existing RFC + +- **Open**: Currently there are less that 10 [open RFCs](https://github.com/pivotal/kpack/pulls?q=is%3Apr+label%3ARFC+is%3Aopen) (some of them opened 2 years ago) in [kpack](https://github.com/pivotal/kpack/) repository. + - The proposal is to suggest the [kpack](https://github.com/pivotal/kpack/) maintainers to: + - Triage those RFCs an update their status before the donation. + - Co-ordinate the announcement of the donation to the RFCs authors and explain them the strategy after the migration (next section) + - After the donation, any open RFCs in [kpack](https://github.com/pivotal/kpack/) repository should be closed + - The RFC author should create a new RFC in the CNB RFC [repository](https://github.com/buildpacks/rfcs) and follow the CNB [RFC process](https://github.com/buildpacks/rfcs) + +- **Closed**: For historical purpose, we will keep those RFC in the repository. + +#### Slack channel + +The proposals are: + - `keep` the [kpack](https://github.com/pivotal/kpack/) slack instance from the [Kubernetes slack instance](https://kubernetes.slack.com/channels/kpack), as [kpack](https://github.com/pivotal/kpack/) is a Kubernetes native application most of their users already use [Kubernetes slack instance](https://kubernetes.slack.com/channels/kpack) for communication. + - `create` a new channel in the [CNCF slack instance](https://slack.cncf.io/), this will bring the two communities (kpack and CNB) together + +[kpack](https://github.com/pivotal/kpack/) maintainers should include the notification of the new channel in the announcement of the donation. + +[Platform maintainers](https://github.com/buildpacks/community/blob/main/TEAMS.md#maintainers-1) will have to request or create the new slack channel with the following name: **buildpacks-kpack** (which will be defined as the preferred channel to be used). + +# Risks + +- So far the main company behind [kpack](https://github.com/pivotal/kpack/) is [VMware](https://www.vmware.com/), a reduction in the investment from [VMware](https://www.vmware.com/) would create a problem and the CNB project would have to either sunset [kpack](https://github.com/pivotal/kpack/) or find investment from the community. +- It's not clear how to handle the budget required to finance the infrastructure to rebuild the CI/CD pipelines on CNCF CNB infrastructure. +- Evaluate any legal requirement from [CNCF](https://www.cncf.io) that must be fulfilled before accepting the project into the [CNB](https://buildpacks.io/) ecosystem. + +# Drawbacks + +Why should we _not_ do this? + +- If the [CNB](https://buildpacks.io/) team expects to implement a different kind of integration with [Kubernetes](https://kubernetes.io/), then accepting the donation of [kpack](https://github.com/pivotal/kpack/) could conflict with that strategy. +- Another component to maintain which requires additional context and expertise in [Kubernetes](https://kubernetes.io/). + +# Alternatives + +- What other designs have been considered? + - [VMware](https://www.vmware.com/) could continue to control the project, but it doesn't help on increase the adoption because it remains as a single-vendor driven project + - [VMware](https://www.vmware.com/) could donate [kpack](https://github.com/pivotal/kpack/) to the [Continuous Delivery Foundation](https://cd.foundation/), but [CNB](https://buildpacks.io/) presents a natural home for [kpack](https://github.com/pivotal/kpack/) (it is an implementation of the platform specification) + - [VMware](https://www.vmware.com/) could create a new [CNCF](https://www.cncf.io/) project and move all [kpack](https://github.com/pivotal/kpack/) resources to it, but in this case it would need to undergo as a sandbox project for example. + +- Why is this proposal the best? + +[kpack](https://github.com/pivotal/kpack/) is a mature Kubernetes-native tool that leverages buildpacks and is used in production environments. The project's maintainers and contributors possess valuable technical and user context, derived from developing [kpack](https://github.com/pivotal/kpack/) and integrating feedback from users utilizing [CNB](https://buildpacks.io/) concepts when presented as part of Kubernetes resources. + +- What is the impact of not doing this? + +The [CNB](https://buildpacks.io/) community would have to develop from scratch any kind of integration with the Cloud Native Ecosystem to satisfy the project goals. + +**Prior Art** + +- Guidelines for accepting component-level contributions [RFC #143](https://github.com/buildpacks/rfcs/pull/143) +- Component Maintainer Role [RFC #234](https://github.com/buildpacks/rfcs/pull/234) +- Proposal to move CNCF slack [RFC #198](https://github.com/buildpacks/rfcs/pull/198) + +# Unresolved Questions + +See the risks section + +# Spec. Changes (OPTIONAL) + +None diff --git a/text/0122-2023H2-roadmap.md b/text/0122-2023H2-roadmap.md new file mode 100644 index 000000000..972ff9eef --- /dev/null +++ b/text/0122-2023H2-roadmap.md @@ -0,0 +1,177 @@ +# Meta +[meta]: #meta +- Name: 2023H2 Roadmap +- Start Date: 2023-06-07 +- Author(s): hone +- Status: Approved +- RFC Pull Request: [rfcs#286](https://github.com/buildpacks/rfcs/pull/286) +- CNB Pull Request: +- CNB Issue: N/A +- Supersedes: [RFC 0118](https://github.com/buildpacks/rfcs/blob/main/text/0118-2023H1-roadmap.md) + +# Summary +[summary]: #summary + +This RFC details the second half of the 2023 Roadmap leading up to KubeCon NA. + +# Definitions +[definitions]: #definitions + +## Readmap Status Items +* Finished - The item has already been completed and there is no need to put it on the current roadmap. +* Continue - More work is needed and it will be continued as part of the current roadmap. +* Defer - This item will be parked for now and doesn't make the cut for the current roadmap. + +# Motivation +[motivation]: #motivation + +With KubeCon NA coming up around the corner, it's time to plan out what we want to achieve as a project. It's also an opportunity to review items from the H1 roadmap. + +# What it is +[what-it-is]: #what-it-is + +## 2023H2 Roadmap + +This roadmap is going to be split into two sections covering the first the H1 Roadmap and status, as well as new items we want to tackle leading up to KubeCon NA. + +### Items from [2023H1] +[items-from-2023h1]: #items-from-2023h1 + +As a project, we've made good progress on our H1 roadmap, but still need some more time to get some of them across the finish line. + +#### Release Base Image Extension +* Owner: @natalieparellano +* Status: Continue +* Links: [RFC](https://github.com/buildpacks/rfcs/blob/main/text/0105-dockerfiles.md) + +This is close and will be released as part of Platform `0.12`, Buildpack `0.10`, lifecycle `0.17.0`, and pack `0.30.0`. + +#### Remove Stacks & Mixins +* Owner: @jkutner +* Status: Continue +* Links: [RFC](https://github.com/buildpacks/rfcs/blob/main/text/0096-remove-stacks-mixins.md) + +This is close and will be released as part of Platform `0.12`, Buildpack `0.10`, lifecycle `0.17.0`, and pack `0.30.0`. + +#### Execution Environments RFC +* Owner: @hone +* Status: Continue +* Links: [RFC](https://github.com/buildpacks/rfcs/pull/274) + +The RFC is written, but feedback needs to be incoporated before re-opening for review. + +#### Project Health +* Owner: @samj1912 +* Status: Finished +* Links: [Buildpacks Community Organization RFC](https://github.com/buildpacks/rfcs/pull/273) + +The RFC has been merged and the buildpacks community GitHub org have been created. + +#### Pack Test Cleaning/Optimizations +* Owner: @dfreilich +* Status: Defer +* Links: [Pack Pull Request](https://github.com/buildpacks/pack/pull/1498) + +This item has been deferred for now, but we will work with anyone who wants to push this forward. + +### New Items + +#### Community Engagement Health Checks +* Owner: @microwavables (Team Lead sponsor @jkutner) +* Links: [VMware Tanzu Community Engagement Health Checks](https://github.com/vmware-tanzu/community-engagement/blob/main/HEALTHCHECKS.md) + +The project is lucky to have a Community Manager! This is one of the projects proposed by @microwavables to set a base line to measure how we're doing as a community. + +#### RFC for Buildpack Author Observability +* Owner: @joshwlewis (Team Lead sponsor @hone) +* Links: TBD + +Currently, Buildpack Authors have little to no tools around visibility with their buildpacks as they run on a platform, including `pack`. In some of the Heroku v2 buildpacks, they implemented logging that could be handed off to the platform by running `bin/report`. This work stream is about standardizing output for both successful AND failed builds that Buildpack Authors can use to instrument their buildpack. + +#### Private Registry Mirrors +* Owner: @jabrown85 +* Links: [RFC](https://github.com/buildpacks/rfcs/pull/285) + +A platform operator can configure registry mirrors that lifecycle could use without needing for the manifest to have to point to it. This will allow a platform to reduce the risk of service operations from external registry sources and reduce public network bandwidth usage. The resulting image when taken off platform will also function without needing access to the registry mirror. + +#### kpack Donation +* Owner: @jjbustamante (Team Lead sponsor @samj1912) +* Links: [RFC](https://github.com/buildpacks/rfcs/pull/235) + +`kpack` is being proposed to be donated as an open source project in the Cloud Native Buildpacks' new Community Organization as a vendor neutral staging ground. This will give the project space to grow the project contributor base from multiple vendors. While work has started on this in H1, this item represents our commitment as a project to see this through and set this project up for success under the CNB governance umbrella. + +#### Cosign Integration / OCI Image Manifest v1.1 +* Owner: @natalieparellano +* Links: [Cosign Integration RFC](https://github.com/buildpacks/rfcs/pull/195), [SBOM layer RFC](https://github.com/buildpacks/rfcs/pull/278), + +While CNBs support SBOMs today, they were designed a few years ago and tooling around them have been evolving. This work stream is about making CNBs integrate better with tools like [cosign's SBOM spec](https://github.com/sigstore/cosign/blob/main/specs/SBOM_SPEC.md) and the upcoming [OCI References](https://github.com/opencontainers/image-spec/issues/827) feature in OCI Image Manifest 1.1. + +#### Pack OCI Manifest Support +* Owner: @jjbustamante (Team Lead sponsor @hone) +* Links: [RFC](https://github.com/buildpacks/rfcs/pull/283) + +Multi-arch support has been a highly request feature with the growing popularity of the ARM architecture. In order to better support this with Buildpacks, the first step will be to able to use manifest lists to provide a single URI for Buildpacks that support multiple architectures. + +#### Export to OCI Layout +* Owner: @jjbustamante (Team Lead sponsor @natalieparellano) +* Links: [RFC](https://github.com/buildpacks/rfcs/blob/main/text/0119-export-to-oci.md) + +The RFC has been merged and the implementation is expected to be released in experimental mode on pack *v0.30.0* + +# How it Works +[how-it-works]: #how-it-works + +See [What it is](#what-it-is) for the details. We'll be following the same process from the [2023H1 Roadmap RFC](https://github.com/buildpacks/rfcs/blob/main/text/0118-2023H1-roadmap.md) if approved. + +# Migration +[migration]: #migration + +N/A + +# Drawbacks +[drawbacks]: #drawbacks + +- Agreeing to more work, while we still need to finish 2023H1 items. + +# Alternatives +[alternatives]: #alternatives + +- Do Nothing and just hunker down on our existing items. + +# Prior Art +[prior-art]: #prior-art + +See [Prior Art from 2023H1 Roadmap RFC](https://github.com/buildpacks/rfcs/blob/main/text/0118-2023H1-roadmap.md#prior-art). + +# Unresolved Questions +[unresolved-questions]: #unresolved-questions + +- What parts of the design do you expect to be resolved before this gets merged? +- What parts of the design do you expect to be resolved through implementation of the feature? +- What related issues do you consider out of scope for this RFC that could be addressed in the future independently of the solution that comes out of this RFC? + +# Spec. Changes (OPTIONAL) +[spec-changes]: #spec-changes + +N/A + +# History +[history]: #history + + diff --git a/text/0123-flatten-feature.md b/text/0123-flatten-feature.md new file mode 100644 index 000000000..4f0f31013 --- /dev/null +++ b/text/0123-flatten-feature.md @@ -0,0 +1,222 @@ +# Meta +[meta]: #meta +- Name: Flatten builders +- Start Date: 2023-07-13 +- Author(s): @jjbustamante, @dlion +- Status: Approved +- RFC Pull Request: (leave blank) +- CNB Pull Request: (leave blank) +- CNB Issue: (leave blank) +- Supersedes: (put "N/A" unless this replaces an existing RFC, then link to that RFC) + +# Summary +[summary]: #summary + +We propose to add new capabilities to the Pack tool that allow end-users to reduce the number of Buildpack's layers in a Builder by flattening some Buildpacks according to their requirements. + +This RFC mainly focus on applying this strategy to Builders only. + +# Definitions +[definitions]: #definitions + +- Buildpack: A buildpack is a set of executables that inspects your app source code and creates a plan to build and run your application. +- Builder: A builder is an image that contains all the components necessary to execute a build. A builder image is created by taking a build image and adding a lifecycle, buildpacks, and files that configure aspects of the build including the buildpack detection order and the location(s) of the run image +- Component Buildpack: A component buildpack is a buildpack containing `/bin/detect` and `/bin/build` executables. Component buildpacks implement the [Buildpack Interface](https://github.com/buildpacks/spec/blob/main/buildpack.md). +- Composite Buildpack: A composite buildpack is a buildpack containing an order definition in `buildpack.toml`. Composite buildpacks do not contain `/bin/detect` or `/bin/build` executables. They MUST be [resolvable](https://github.com/buildpacks/spec/blob/main/buildpack.md#order-resolution) into a collection of component buildpacks. +- Buildpackage: A buildpackage is a [distributable](https://github.com/buildpacks/spec/blob/main/distribution.md) artifact that contains a buildpack. + +# Motivation +[motivation]: #motivation + +- Why should we do this? + +There is a limit in the number of layer an image can have, at least on Docker, which is *127*, this feature has being request by the community, issue [#1595](https://github.com/buildpacks/pack/issues/1595), as a workaround to solve error thrown by docker when the limit is reached + +- What use cases does it support? + +Buildpacks provider like Paketo have Composite Buildpacks with several layers, when they pull many of those together into a Builder, hitting the layer limit for a container image happens very often. A feature for the Builder author to group the Buildpacks by any attribute will allow them to squash those groups into one layer and reduce their total number of layers, avoiding the layer limit. + +- What is the expected outcome? + +When Builder Authors execute the command: + +`pack builder create ... ` + +The final Builder (A) SHOULD contain layers blobs with more than *one* buildpack according to the configuration provided by the user. If we compare an artifact (B) created *without* `` then: + +$numberOfBuildpackLayers(A) \leq numberOfBuildpackLayers(B)$ + +A and B MUST be otherwise interchangeable, only differing by their number of layers. + + +# What it is +[what-it-is]: #what-it-is + +The proposal is to include a new experimental flag to the following command on Pack: + +- `pack builder create` + +The new flag will move from experimental status to supported status when maintainers deem it appropriate. + +The new flag to be included is: + +- `--flatten=` will flatten the Buildpacks specified after the `flatten` flag into a single layer. Can be used more than once, with each use resulting in a single layer. + +We also need to define how a Platform implementor needs to consume a flattened Builder. + +- When a Platform consumes a Builder, they will need to inspect each Buildpack layer blob and determine if the blob contains more than one Buildpack, in such as case, they will need to process those Buildpacks correctly. + + +# How it Works +[how-it-works]: #how-it-works + +Let's say we have a Composite Buildpack (CB1) with the following dependency tree: +```mermaid +flowchart TD + A[CB1] + A --> B[G1] + A --> C[G2] + B --> BPA[BP1] + B --> BPFOO[BP2] + B --> BPC[BP4] + C --> BPD[BP1] + C --> BPBAR[BP3] + C --> BPE[BP4] +``` + +Until now, when a Buildpack like this is being shipped into a Builder every individual Buildpack is being saved in one layer, as a result we will have: + +$$ +layer_1 = [CB_1] \\ +layer_2 = [G_1] \\ +layer_3 = [BP_1] \\ +layer_4 = [BP_2] \\ +layer_5 = [BP_4] \\ +layer_6 = [G_2] \\ +layer_7 = [BP_3] \\ +total = \text{7 layers} +$$ + +Noticed that duplicated Buildpacks are cleaned up. + +We can use the new `flatten` flag to reduce the number of Builder layers used by the buildpacks in different ways. + +* `--flatten=` i.e. `--flatten= --flatten=`: + Will group the given Buildpacks into one layer and keep the other ones as single layers Buildpacks, the result will be: + +```mermaid +classDiagram + class Layer1 { + CB1 + } + class Layer2 { + G1 + } + class Layer3 { + BP1 + BP2 + } + class Layer4 { + G2 + } + class Layer5 { + BP3 + BP4 + } +``` + + +$$ +total = \text{5 layers} +$$ + +--- + + +# Migration +[migration]: #migration + + +The current [distribution spec](https://github.com/buildpacks/spec/blob/main/distribution.md#buildpackage) defines: + +``` +Each buildpack layer blob MUST contain a single buildpack at the following file path: + +/cnb/buildpacks/// +``` + +A Builder flattened with this new feature would not be consumable by older platform implementations because they are not expecting to find more than one buildpack on a blob layer. + + + +# Drawbacks +[drawbacks]: #drawbacks + +Why should we *not* do this? + +It could create artifacts that are not consumable by older platforms. + + +# Alternatives +[alternatives]: #alternatives + +- What other designs have been considered? + +Some other alternatives mentioned are: squashing by the buildpack size or squashing a CNB Builder when the number of layers is reaching the limit, but those ideas, do not provide the freedom to the buildpacks authors to decide which buildpacks to flatten. + + +- Why is this proposal the best? + +Not sure if it is the best, but a way to solve the `layer limit error` is to optimize the uses of the layer in a Builder. + +- What is the impact of not doing this? + +Builder Authors and Platform Operators will keep seeing the layer limit error. + +# Prior Art +[prior-art]: #prior-art + +Discuss prior art, both the good and bad. + +--- + + + +# Spec. Changes (OPTIONAL) +[spec-changes]: #spec-changes + +No spec changes at this time + + + +# History +[history]: #history + +% diff --git a/text/0124-pack-manifest-list-commands.md b/text/0124-pack-manifest-list-commands.md new file mode 100644 index 000000000..639c69f8b --- /dev/null +++ b/text/0124-pack-manifest-list-commands.md @@ -0,0 +1,460 @@ +# Meta +[meta]: #meta +- Name: Manifest List Commands for Pack +- Start Date: 2023-04-19 +- Author(s): [Juan Bustamante](https://github.com/jjbustamante) +- Status: Approved +- RFC Pull Request: (leave blank) +- CNB Pull Request: (leave blank) +- CNB Issue: (leave blank) +- Supersedes: (put "N/A" unless this replaces an existing RFC, then link to that RFC) + +# Summary +[summary]: #summary + +The problem for adding support for multi-arch buildpacks can be divided into two parts: +- Support buildpack authors to **migrate their existing buildpacks** to support multi-arch . +- Support buildpack authors to **create new buildpacks and builders** that handle multi-arch from the beginning. + +This RFC proposes to create a new set of CRUD commands in pack to handle manifest lists, which will be used to support the first part of the problem. + +# Definitions +[definitions]: #definitions + +- Image Manifest: The image manifest provides a configuration and set of layers for a single container image for a specific architecture and operating system. See [spec](https://github.com/opencontainers/image-spec/blob/main/manifest.md) +- Image Index: The image index is a higher-level manifest which points to specific image manifests, ideal for one or more platforms. See [spec](https://github.com/opencontainers/image-spec/blob/main/image-index.md) + +# Motivation +[motivation]: #motivation + +- Why should we do this? + +The uses of ARM architecture in the cloud and edge computing has been growing rapidly. The CNCF community has been also growing in the last years, and there is a need to support multi-arch for all the projects. The buildpacks community is not an exception, issues like: +- [It would be nice to support easily creating a manifest list packages and builders](https://github.com/buildpacks/pack/issues/1460) +- [Provide a way to specify desired platform when creating packages and builders](https://github.com/buildpacks/pack/issues/1459) +- [Multi arch image build support](https://github.com/buildpacks/pack/issues/1570) + +Or the conversations around this topic in our [slack channel](https://cloud-native.slack.com/archives/C032LNSMY0P), even the [talk at Kubecon NA 2022](https://www.youtube.com/watch?v=Sdr5axlOnDI&list=PLj6h78yzYM2O5aNpRM71NQyx3WUe1xpTn&index=76) demonstrate the interest from the community in this feature. + +- What use cases does it support? + +Currently, buildpack authors can build and package their buildpacks for different OS and Architectures, but when they distribute them the URI for a buildpack can’t disambiguate, they need to use different tags to differentiate between them. This makes harder for users to consume those Buildpacks. +The solution is to share a single URI for all the different OS and Architectures, and the way to do that is using a manifest list. + +Adding commands to support the operations to handle the manifest list will allow buildpack authors to migrate their existing buildpacks to support multi-arch, without afecting their current existing process. + +- What is the expected outcome? + +The expected outcome is to have a set of commands in pack to handle manifest lists, for example: +- A command to create a manifest list from a set of images +- A command to push the manifest list to a registry +- A command to update or delete a manifest list + +# What it is +[what-it-is]: #what-it-is + +The proposal is to add a new _experimental_ command `pack manifest` and different subcommands. The `pack manifest` commands will initially be gated behind `pack config experimental`. The `pack manifest` commands will move from experimental status to supported status when maintainers deem it appropriate. +- `pack manifest create` will create a local manifest list for annotating and pushing to a registry +- `pack manifest annotate` will add additional information like os, arch or variant to an existing local manifest list +- `pack manifest add` will add an image to an existing manifest list +- `pack manifest remove` will delete a manifest list from local storage +- `pack manifest rm` will remove an image from a manifest list in the local storage +- `pack manifest push` will push a manifest list to a registry +- `pack manifest inspect` will show the manifest information stored in local storage + +Our target user affected by the feature is: **Buildpack Authors**. Let's see some examples of how this feature will work. + +Currently, if we check [sample-packages](https://hub.docker.com/r/cnbs/sample-package/tags) at dockerhub we will notice that we have a composed buildpack called `hello-universe` and we offer two tags to support different architectures: +- `cnbs/sample-package:hello-universe` for linux and +- `cnbs/sample-package:hello-universe-windows`. + + +Let's suppose our linux version is called `cnbs/sample-package:hello-universe-linux` to keep the same naming convention, but we will keep it as it is for simplicity. If we want to distribute the `hello-universe` buildpack for any **architecture/os/variant** combination we need to use a tool outside the CNB ecosystem to create a manifest list. With the proposed experimental commands on pack we can do: + +```bash +$ pack manifest create cnbs/sample-package:hello-multiarch-universe \ + cnbs/sample-package:hello-universe \ + + cnbs/sample-package:hello-universe-windows +``` + +By default, the command will create a manifest list in the local storage using the docker media types [Version 2 schema 2](https://docs.docker.com/registry/spec/manifest-v2-2/) with a content similar to: + +```json +{ + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.list.v2+json", + "manifests": [ + { + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "size": 226083, + "digest": "sha256: 87a832fd6a8d6995d336c740eb6f3da015401a6e564fcbe95ee1bf37557a8225", + "platform": { + "os": "linux", + "architecture": "" + } + }, + { + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "size": 226083, + "digest": "sha256:670d62fbee841d256a706801a03be9c84d37fc2cd6ef7538a7af9985c3d2ed8b", + "platform": { + "os": "windows", + "architecture": "" + } + } + ] +} +``` +The idea to save the manifest list locally is to allow the user to update the manifest before pushing it to a registry, + +in this case, we need to define the **architecture** field because it is empty in our examples. + +We can use the `pack manifest annotate` command to add the architecture information: + +```bash +$ pack manifest annotate --arch amd64 cnbs/sample-package:hello-multiarch-universe cnbs/sample-package:hello-universe +$ pack manifest annotate --arch amd64 cnbs/sample-package:hello-multiarch-universe cnbs/sample-package:hello-universe-windows +``` + +After executing these commands, our local manifest list will be updated as follows: + +```json +{ + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.list.v2+json", + "manifests": [ + { + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "size": 940, + "digest": "sha256:87a832fd6a8d6995d336c740eb6f3da015401a6e564fcbe95ee1bf37557a8225", + "platform": { + "architecture": "amd64", + "os": "linux" + } + }, + { + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "size": 1148, + "digest": "sha256:670d62fbee841d256a706801a03be9c84d37fc2cd6ef7538a7af9985c3d2ed8b", + "platform": { + "architecture": "amd64", + "os": "windows" + } + } + ] +} +``` + +Finally, our manifest list is ready to be pushed to a registry, and we can use the `pack manifest push` command to do it: + +```bash +$ pack manifest push cnbs/sample-package:hello-multiarch-universe +``` +And our manifest list should be published at [dockerhub](https://hub.docker.com/r/cnbs/sample-package/tags) as `cnbs/sample-package:hello-multiarch-universe`, asuming that we have the proper credentials to push the image. + + + +# How it Works +[how-it-works]: #how-it-works + +The proposal is to implement an abstraction of an OCI *Image Index* and expose it to users through `pack manifest` commands. + +## Image Index Abstraction + +A new high level abstraction to represent an OCI Image Index is proposed, similar to the [Image](https://github.com/buildpacks/imgutil/blob/main/image.go) interface exposed in *imgutil* repository, + +we proposed a new *ManifestList* interface to expose the behavior of an OCI Image Index. + +```mermaid +classDiagram + class ManifestList { + <> + +Add(repoName string) error + +Remove(repoName string) error + +Delete(additionalNames []string) error + +AnnotateManifest(manifestName string, opts AnnotateFields) error + +Save() error + } + + class remote_ManifestList { + +NewManifestList(repoName string, keychain authn.Keychain) ManifestList + + } + + class local_ManifestList { + +NewManifestList(repoName string, path string) ManifestList + } + + + class AnnotateFields { + +String Architecture + +String OS + +String Variant + } + + ManifestList <|-- remote_ManifestList + ManifestList <|-- local_ManifestList + +``` + +Two implementations: *remote* and *local* are proposed, *remote* will take care of implementing the interaction with an OCI registry and *local* will deal with the local storage operations. + +### Component Diagram + +Using a [C4 component diagram](https://c4model.com/#ComponentDiagram), we can define the high level interaction on pack. This design follows the same pattern for each command already implemented. + +![](https://hackmd.io/_uploads/B1PpJ-fKh.png) + +- *Image Factory*: is responsible for instantiate the *Image Index* abstraction based on the configuration require, it could be a *remote* or a *local* implementation +- *Image Index*: is the abstraction defined which exposes the operation methods we want to offer to users + - As we can see, depending on the implementation it will interact with the file system or with a remote registry + +### Considerations + +#### When a user wants to create a manifest list using a manifest outside the user's repo. + + +As a pack user I want to create a manifest list `foo/my-manifest:my-tag` using a manifest outside my repository `foo` for example `other/external-manifest:latest`. + +The user invokes a command similar to: `pack manifest create foo/my-manifest:my-tag other/external-manifest:latest` + +In this case, pack will need to *copy* the external image `other/external-manifest:latest` into `foo` repository `foo/external-manifest:latest` and then uses this reference to create the manifest list. In such as case `pack` should: + - *warn* the user about this operation, for example with a message + - *ask the user for confirmation* before executing the operation + +#### When a user wants to create a manifest list referencing a manifest list + +As a pack user I want to create a manifest list using a reference to another manifest list + +The user invokes a command similar to: `pack manifest create foo/my-manifest:my-tag foo/another-manifest:latest` + +In this case, pack should: + - add into the **manifests** array of objects a reference to the manifest index using the media-type `application/vnd.oci.image.index.v1+json` (nested index) + +### Commands details + +#### Create a Manifest List + +Pack will create a manifest a local manifest, it should handle the following scenarios: +- IF user references a manifest list that already exists in a registry: In this case, pack will save a local copy of the remote manifest list, this is useful for updating (adding, updating or deleting) images +- IF user references a manifest list that doesn't exist in a registry: pack will create a local representation of the manifest list that will only save on the remote registry if the user publish it + +```bash +manifest create generates a manifest list for a multi-arch image + +Usage: + pack manifest create [ ... ] [flags] + +Examples: +pack manifest create cnbs/sample-package:hello-multiarch-universe \ + cnbs/sample-package:hello-universe \ + cnbs/sample-package:hello-universe-windows + +Flags: + -f, --format string Format to save image index as ("OCI" or "V2S2") (default "v2s2") + --insecure Allow publishing to insecure registry + --publish Publish to registry + -r, --registry string Registry URL to publish the image index +``` + +#### Annotate (os/arch) a Manifest List + + +Sometimes a manifest list could reference an image that doesn't specify the *architecture*, for example, [check](https://hub.docker.com/r/cnbs/sample-package/tags) our sample buildpack +packages. The `annotate` command allows users to update those values before pushing the manifest list a registry + +```bash +manifest annotate modifies a manifest list (Image index) and update the platform information for an image included in the manifest list. + +Usage: + pack manifest annotate [OPTIONS] [flags] + +Examples: +pack manifest annotate cnbs/sample-package:hello-universe-multiarch \ cnbs/sample-package:hello-universe --arch amd64 + +Flags: + --arch string Set the architecture + --os string Set the operating system + --variant string Set the architecture variant +``` + +#### Add an image to a Manifest List + +When a manifest list exits locally, user can add a new image to the manifest list using this command + +```bash +manifest add modifies a manifest list (Image index) and add a new image to the list of manifests. + +Usage: + pack manifest add [OPTIONS] [flags] + +Examples: +pack manifest add cnbs/sample-package:hello-multiarch-universe \ + cnbs/sample-package:hello-universe-riscv-linux + +Flags: + --all add all of the contents to the local list (applies only if is an index) + --arch string Set the architecture + --os string Set the operating system + --variant string Set the architecture variant +``` + +#### Remove an image from a Manifest List + +In the opposite case, users can remove existing images from a manifest list + +```bash +Delete one or more manifest lists from local storage + +Usage: + pack manifest remove [manifest-list] [manifest-list...] [flags] + +Examples: +pack manifest delete cnbs/sample-package:hello-multiarch-universe + +Flags: + -h, --help Help for 'remove' +``` + +#### Remove a local Manifest List + +Sometimes users can just experiment with the feature locally and they want to discard all the local information created by pack. `rm` command just delete the *local* manifest list + +```bash +manifest remove will remove the specified image manifest if it is already referenced in the index + +Usage: + pack manifest rm [manifest-list] [manifest] [flags] + +Examples: +pack manifest rm cnbs/sample-package:hello-multiarch-universe \ + cnbs/sample-package:hello-universe-windows + +Flags: + -h, --help Help for 'rm' + +``` + +#### Push a Manifest List to a remote registry + +Once a manifest list is ready to be publishe into the registry, the `push` command can be used + +```bash +manifest push pushes a manifest list (Image index) to a registry. + +Usage: + pack manifest push [OPTIONS] [flags] + +Examples: +pack manifest push cnbs/sample-package:hello-multiarch-universe + +Flags: + -f, --format string Manifest list type (oci or v2s2) to use when pushing the list (default is v2s2) + --insecure Allow publishing to insecure registry + -p, --purge Delete the manifest list or image index from local storage if pushing succeeds +``` + +#### Inspect a Manifest List + +Finally, the `inspect` command will help users to view how their local manifest list looks like + +```bash +manifest inspect shows the manifest information stored in local storage + +Usage: + pack manifest inspect [flags] + +Examples: +pack manifest inspect cnbs/sample-builder:multiarch + +Flags: + -h, --help Help for 'inspect' + +``` + +One important concern for users is to inspect the content of a multi-arch builder or buildpack if they are accessible behind a manifest list. The proposal is to implement a `platform` flag for commands: + +- `pack builder inspect` +- `pack buildpack inspect` + +The `--platform` flag specifies the platform in the form os/arch[/variant][:osversion] (e.g. linux/amd64). By default it will reference the host values. + +The output of the commands should remain the same. + + +# Migration +[migration]: #migration + +This section should document breaks to public API and breaks in compatibility due to this RFC's proposed changes. In addition, it should document the proposed steps that one would need to take to work through these changes. Care should be give to include all applicable personas, such as platform developers, buildpack developers, buildpack users and consumers of buildpack images. + +# Drawbacks +[drawbacks]: #drawbacks + +Why should we *not* do this? + +We should decide to do not add this feature and users could use tools like `docker` or `podman` to create and handle their manifest list, however this is a poor user experience forcing users to use different tools to fulfill scenarios that are part of the business domain of pack. + +# Alternatives +[alternatives]: #alternatives + +- What other designs have been considered? + +We also have in mind to improve existing commands like `pack builder create` and `pack buildpack package` to support the creation of a manifest list without the need of a new command. However, this approach could not be suitable for buildpack authors who are maintaining existing buildpacks and they will need to change their current process to generate multi-arch images. + +- Why is this proposal the best? + +Because we will provide the tool to our end users to solve their problems without the need of using other tools. + +- What is the impact of not doing this? + +The impact of not doing this is that users will need to use other tools to create and handle their manifest list, which is a poor user experience. + +# Prior Art +[prior-art]: #prior-art + +These features are inspired in similar commands in other tools like: +- [docker](https://docs.docker.com/engine/reference/commandline/manifest/) +- [podman](https://docs.podman.io/en/v3.2.0/markdown/podman-manifest-create.1.html) + +# Unresolved Questions +[unresolved-questions]: #unresolved-questions + +- What parts of the design do you expect to be resolved before this gets merged? +- What parts of the design do you expect to be resolved through implementation of the feature? +- What related issues do you consider out of scope for this RFC that could be addressed in the future independently of the solution that comes out of this RFC? + +# Spec. Changes (OPTIONAL) +[spec-changes]: #spec-changes +Does this RFC entail any proposed changes to the core specifications or extensions? If so, please document changes here. +Examples of a spec. change might be new lifecycle flags, new `buildpack.toml` fields, new fields in the buildpackage label, etc. +This section is not intended to be binding, but as discussion of an RFC unfolds, if spec changes are necessary, they should be documented here. + +# History +[history]: #history + + diff --git a/text/0125-lifecycle-parallel-export.md b/text/0125-lifecycle-parallel-export.md new file mode 100644 index 000000000..0d193a8ab --- /dev/null +++ b/text/0125-lifecycle-parallel-export.md @@ -0,0 +1,213 @@ +# Meta +[meta]: #meta +- Name: Export App Image and Cache Image in Parallel +- Start Date: 2023-08-26 +- Author(s): ESWZY +- Status: Approved +- RFC Pull Request: [rfcs#291](https://github.com/buildpacks/rfcs/pull/291) +- CNB Pull Request: [lifecycle#1167](https://github.com/buildpacks/lifecycle/pull/1167) +- CNB Issue: N/A +- Supersedes: N/A + +# Summary +[summary]: #summary + +Export app image and cache image in parallel during export phase of lifecycle. In the original logic, it has to wait for the export of the app image to be completed before exporting the cache image. This will result in a period of idleness, and the network resources and I/O resources will not be fully utilized, resulting in a longer waiting time for the overall export, and also a longer overall build time. By parallelizing export phase, network and I/O resources can be used to the maximum, thereby saving time. + +# Definitions +[definitions]: #definitions + +* lifecycle: software that orchestrates a CNB build. +* Cache Image: The cache stored between builds - stored in a registry. + +# Motivation +[motivation]: #motivation + +In some scenario, the app image and the cache image need to be exported at the same time, but this process is serial in the lifecycle, which means that after the app image is exported, we have to wait for the cache image to be exported. But we don’t need to wait for the export of cache image, only after the app is exported, we can continue to next steps (distribution and deployment). + +So we can try to parallelize this step ([lifecycle#1167](https://github.com/buildpacks/lifecycle/pull/1167)) and compare it with serial exporting. After testing the build on some applications, this modification can shorten the export time. + +- Java (app image is 202.361MB, cache image is 157.525MB, with one same layer: 107.648MB): + - Before: total 18.34s, app 8.96s, cache 9.38s + - After: total 14.70s, app 11.42s, cache 13.93s + - app image layers: 0+1.103MB+15.153MB+107.648MB+49.953MB+0+0+28.502MB + - cache image layers: 9.411MB+40.465MB+107.648MB + +- Go (app image is 114.273MB, cache is 175.833MB, no same layer): + - Before: total 16.57s, app 5.92s, cache 10.65s + - After: total 12.02s, app 7.31s, cache 11.48s + - app image layers: 0+1MB+25.72MB+8.993MB+49.953MB+0+0+28.502MB + - cache image layers: 70.87MB+104.964MB + +# What it is +[what-it-is]: #what-it-is + +The proposal is to add a new capability to the lifecycle (enabled by configuration) to export app image and cache image to registry in parallel. + +The target personas affected by this change are: + + - **buildpack user**: they will experience higher disk I/O or network pressure if this feature is enabled by default. + - **Platform implementors**: they will choose parallel or serial export, to suit how the platform works. Serial export helps to get app image faster, while parallel export can complete the build process faster. + +This proposal, in addition to acceleration, has the greatest impact on the export time of app image. This will lead to resource competition, that is, when the app image and cache image are exported at the same time, the app image export time will become longer. For some users, they only care about the export time of the app image, but not the overall optimization effect, and enabling this capability will affect their performance. Therefore, this ability is optional. + +The flag name, refer to other boolean flag like `daemon`, `layout`, I think it can be named `parallel`. And then the usage is just like this: +``` +/cnb/lifecycle/exporter \ + [-analyzed ] \ + [-app ] \ + [-cache-dir ] \ + [-cache-image ] \ + [-daemon] \ # sets + [-extended ] \ + [-gid ] \ + [-group ] \ + [-launch-cache ] \ + [-launcher ] \ + [-launcher-sbom ] \ + [-layers ] \ + [-layout] \ # sets + [-layout-dir] \ # sets + [-log-level ] \ + [-parallel] \ # sets + [-process-type ] \ + [-project-metadata ] \ + [-report ] \ + [-run ] \ + [-uid ] \ + [...] +``` + +# How it Works +[how-it-works]: #how-it-works + +It will be done using goroutines. Goroutine is a lightweight multi-threading mechanism, which can avoid a lot of extra overhead caused by the multi-threaded parallel operation. This function encapsulates the export process of the app image and the cache image into two goroutines to execute in parallel. + +The working principle is shown in the following code (go-like pseudocode). Execute in parallel through two goroutines and wait for all export processes to finish. If parallel export is not enabled, the process is exactly the same as the original serial export. + +```go +func export() { + exporter := &lifecycle.Exporter{} + // ... + + var wg sync.WaitGroup + + wg.Add(1) + go func() { + defer wg.Done() + exporter.Export() + }() + + if !enableParallelExport { + wg.Wait() + } + + wg.Add(1) + go func() { + defer wg.Done() + exporter.Cache() + }() + + wg.Wait() + + // ... +} +``` + +## Examples + +For command line use, control this process through the `parallel` flag. + +### Export both app image and cache image + +By specifying environment variable `CNB_PARALLEL_EXPORT`, or pass a `-parallel` flag, images will be pushed to `cr1.example.com` and `cr2.example.com` simultaneously. + +```shell +> export CNB_PARALLEL_EXPORT=true +> /cnb/lifecycle/exporter -app cr1.example.com/foo:app -cache-image cr2.example.com/foo:cache + +# OR + +> /cnb/lifecycle/exporter -app cr1.example.com/foo:app -cache-image cr2.example.com/foo:cache -parallel +``` + +### Export app image only or export cache image only + +If export one image, the effect of this function is not very obvious. + +```shell +> export CNB_PARALLEL_EXPORT=true +> /cnb/lifecycle/exporter -app cr1.example.com/foo:app +[debug] Parsing inputs... +[warn] parallel export has been enabled, but it has not taken effect because cache image (-cache-image) has not been specified. + +# OR + +> /cnb/lifecycle/exporter -app cr1.example.com/foo:app -parallel +[debug] Parsing inputs... +[warn] parallel export has been enabled, but it has not taken effect because cache image (-cache-image) has not been specified. + +# EQUAL TO + +> /cnb/lifecycle/exporter -app cr1.example.com/foo:app +``` + +# Migration +[migration]: #migration + +We maybe need to add a new API option for buildpack users, to choose whether this feature should be enabled. + +# Drawbacks +[drawbacks]: #drawbacks + + - This will lead to resource competition, the app image export time will become longer. + +# Alternatives +[alternatives]: #alternatives + +N/A. + +# Prior Art +[prior-art]: #prior-art + +N/A. + +# Unresolved Questions +[unresolved-questions]: #unresolved-questions + +- How to allow users to choose the export method? environment variable? Or a parameter of the creator? +- Does it also need to be specified in the pack tool? +- Should this feature be enabled by default? + +# Spec. Changes +[spec-changes]: #spec-changes + +This new feature will affect the API of [Create](https://buildpacks.io/docs/concepts/components/lifecycle/create/) and [Export](https://buildpacks.io/docs/concepts/components/lifecycle/export/) phases, by adding the following fields. + +Back to API changes, we will add a new flag to control this. + +| Input | Environment Variable | DefaultValue | Description | +|--------------|-----------------------|--------------|----------------------------------------------| +| `` | `CNB_PARALLEL_EXPORT` | `false` | Export app image and cache image in parallel | + + +# History +[history]: #history + + \ No newline at end of file diff --git a/text/0126-creator-skip-sbom.md b/text/0126-creator-skip-sbom.md new file mode 100644 index 000000000..eebd2b1ad --- /dev/null +++ b/text/0126-creator-skip-sbom.md @@ -0,0 +1,131 @@ +# Meta +[meta]: #meta +- Name: Enable CNB_SKIP_SBOM IN /cnb/lifecycle/creator +- Start Date: (fill in today's date: 2023-10-17) +- Author(s): kritkasahni-google +- Status: Approved +- RFC Pull Request: +- CNB Pull Request: +- CNB Issue: +- Supersedes: N/A + +# Summary +[summary]: #summary + +Enable CNB_SKIP_SBOM IN /cnb/lifecycle/creator to skip restoring SBOM layer from previous app image. We support CNB_SKIP_LAYERS in analyzer which does the same thing and we should support the same in creator also. + +# Definitions +[definitions]: #definitions +* lifecycle: software that orchestrates a CNB build. +* creator: executes all the lifecycle phases one by one in order. +* analyzer: lifecycle phase that restores SBOM layer from previous app image. +* restorer: lifecycle phase that restores layers from cache. +* SBOM: a software bill of materials (SBOM) is a list of all the components that make up the app image. + +# Motivation +[motivation]: #motivation + +To skip restoring SBOM layer from previous image when platform executes lifecycle by calling /cnb/lifecycle/creator. Restoring SBOM layer from previous app image can cause degraded build latency but if buildpack logic does not rely on SBOM from previous app image then should be able to skip restoring it. + +# What it is +[what-it-is]: #what-it-is + +CNB_SKIP_LAYERS is used by /cnb/lifecycle/analyzer to skip restoring SBOM layer from previous app image. +Need a similar mechanism for /cnb/lifecyle/creator specifically to skip restoring only the SBOM layer. + +The target personas affected by this change are: + + - **buildpack user**: if buildpacks don't rely on reusing SBOM layer then buildpack user should ideally see improved build latency by skipping SBOM restoration but reusing other layers from previous app image. + - **Platform implementors**: they will choose to skip restoring SBOM by providing CNB_SKIP_SBOM to trigger /cnb/lifecycle/creator. + + +# How it Works +[how-it-works]: #how-it-works + +Similar to how CNB_SKIP_LAYERS is handled in analyzer whether SBOM needs to be [restored](https://github.com/buildpacks/lifecycle/blob/292aa492a72f4e180bb92d109a73ebf7c8a0451d/phase/analyzer.go#L38) or [not](https://github.com/buildpacks/lifecycle/blob/292aa492a72f4e180bb92d109a73ebf7c8a0451d/phase/analyzer.go#L30) today, CNB_SKIP_SBOM will be be handled in same way in analyzer. +At the platform level, it would be input same way as CNB_SKIP_LAYERS [here](https://github.com/buildpacks/lifecycle/blob/292aa492a72f4e180bb92d109a73ebf7c8a0451d/platform/defaults.go#L184) and [handled](https://github.com/buildpacks/lifecycle/blob/main/platform/lifecycle_inputs.go#L82) like:- + + +``` + var skipSBOM bool + if boolEnv(EnvSkipSBOM){ + skipSBOM = true + } +``` + +In the analyzer, + +``` +analyzer := &Analyzer{ + Logger: logger, + SBOMRestorer: &layer.NopSBOMRestorer{}, + PlatformAPI: f.platformAPI, + } + + ... + if f.platformAPI.AtLeast("0.8") && !inputs.SkipLayers && !inputs.SkipSBOM { + analyzer.SBOMRestorer = &layer.DefaultSBOMRestorer{ + LayersDir: inputs.LayersDir, + Logger: logger, + } + } +``` + +# Migration +[migration]: #migration + +CNB_SKIP_SBOM/ will be an optional input to /cnb/lifecycle/creator, and will be false by default. We maybe need to add a new API option for buildpack users, to choose whether this should be enabled. + +# Drawbacks +[drawbacks]: #drawbacks + +N/A + +# Alternatives +[alternatives]: #alternatives + +Platforms that execute lifecycle today via /cnb/lifecycle/creator are unable to skip restoring SBOM layer from previous app image unless they skip reusing previous app image entirely. + +# Prior Art +[prior-art]: #prior-art + +We already support enabling CNB_SKIP_LAYERS in /cnb/lifecycle/analyzer and /cnb/lifecycle/restorer, and CNB_SKIP_RESTORE in /cnb/lifecycle/creator. +* CNB_SKIP_LAYERS in /cnb/lifecycle/analyzer to skip restoring SBOM from previous app image. +* CNB_SKIP_LAYERS in /cnb/lifecycle/restorer to skip reusing previous app image layers entirely. +* CNB_SKIP_RESTORE in /cnb/lifecycle/creator to skips restoring SBOM plus all other layers entirely from previous app image. + +# Unresolved Questions +[unresolved-questions]: #unresolved-questions + +N/A + +# Spec. Changes (OPTIONAL) +[spec-changes]: #spec-changes +This new feature will affect the API of [Create](https://buildpacks.io/docs/concepts/components/lifecycle/create/) phase by adding the following fields. + +Back to API changes, we will add a new flag to control this. + +| Input | Environment Variable | DefaultValue | Description | +|----------------|-----------------------|--------------|----------------------------------------------| +| `` | `CNB_SKIP_SBOM` | `false` | Skip SBOM restoration | + +# History +[history]: #history + + diff --git a/text/0127-extension-layer.md b/text/0127-extension-layer.md new file mode 100644 index 000000000..55f033e66 --- /dev/null +++ b/text/0127-extension-layer.md @@ -0,0 +1,131 @@ +# Meta +[meta]: #meta +- Name: Add extension layer to exchange data +- Start Date: 2023-10-09 +- Author(s): [c0d1ngm0nk3y](https://github.com/c0d1ngm0nk3y), [pbusko](https://github.com/pbusko) +- Status: Approved +- RFC Pull Request: +- CNB Pull Request: +- CNB Issue: +- Related: [RFC#0105 Support Dockerfiles](https://github.com/buildpacks/rfcs/blob/main/text/0105-dockerfiles.md) +- Supersedes: N/A + +# Summary +[summary]: #summary + +This RFC introduces support for Extension configurable context to allow data transfer between the build environment and the Kaniko execution. + +# Motivation +[motivation]: #motivation + +This change allows extensions to create their own context for the extend phase during the generation phase. Additionally, it ensures that extension output does not inadvertently interfere with other extension or buildpack layers during the build, and it does not unintentionally become part of the final application image. + +This would allow distroless run images to be extended. + +# What it is +[what-it-is]: #what-it-is + +This follows up on RFC-0105 and proposes that during the execution of the extension's `./bin/generate`, an extension is allowed to write arbitrary data to the `context` folder within its exclusive output directory. This data then becomes accessible during the execution of the `extend` phase via Kaniko build context. The content of these extension-specific context is ignored at build and launch time, it serves only the extension phase. + +# How it Works +[how-it-works]: #how-it-works + +- Before execution of the `./bin/generate`, the lifecycle will create a distinct writable layer `$CNB_GENERATED_DIR/` for each extension which passed detection. +- The `$CNB_GENERATED_DIR/` is provided to the `./bin/generate` as `` (`$CNB_OUTPUT_DIR`) directory. +- In addition to the files specified in [RFC#0105](https://github.com/buildpacks/rfcs/blob/main/text/0105-dockerfiles.md), the extension may create the following folders with an arbitrary content: + + either: + + - `/context` + + or the image-specific folders: + + - `/context.run` + - `/context.build` + + If the `/context` is provided together with any of the image-specific folders the detection phase must fail. +- If the folder `/context` is present it will be set as Kaniko build context during the `extend` phase of the build and run images. +- If the folder `/context.run` is present it will be set as Kaniko build context during the `extend` phase of the run image only. +- If the folder `/context.build` is present it will be set as Kaniko build context during the `extend` phase of the build image only. +- If none of these folders is not present, Kaniko build context defaults to the `` folder. + +The `$CNB_GENERATED_DIR/` folders will not be included in the final image by the lifecycle. + +### Example: Extend distroless run image with Debian packages. + +This example extension would allow to install `tar` package on the run image without package manager (distroless image). The extension contains `./bin/generate` and `./bin/custom-installer` file, which installs `.deb` files. + +##### `./bin/generate` + +```bash +#!/bin/sh + +mkdir -p ${CNB_OUTPUT_DIR}/context.run + +cp ${CNB_EXTENSION_DIR}/bin/custom-installer ${CNB_OUTPUT_DIR}/context.run/ +curl -o ${CNB_OUTPUT_DIR}/context.run/tar.deb http://security.ubuntu.com/ubuntu/pool/main/t/tar/tar_1.34+dfsg-1ubuntu0.1.22.04.1_amd64.deb + +cat >> "${CNB_OUTPUT_DIR}/run.Dockerfile" <` folder instead of a temporary directory. +- allow optional folders `$CNB_GENERATED_DIR//context`, `$CNB_GENERATED_DIR//context.run` and `$CNB_GENERATED_DIR//context.build` with an arbitrary content to be provided by extension. +- if the context folders are present, kaniko context should be set to the corresponding folder instead of the `` (following the rules defined in [#how-it-works](#how-it-works)). + + \ No newline at end of file diff --git a/text/0128-multiarch-builders-and-package.md b/text/0128-multiarch-builders-and-package.md new file mode 100644 index 000000000..e36d1d2bc --- /dev/null +++ b/text/0128-multiarch-builders-and-package.md @@ -0,0 +1,1090 @@ +# Meta +[meta]: #meta +- Name: Multi-platform support for builders and buildpack packages +- Start Date: 2023-09-14 +- Author(s): @jjbustamante +- Status: Approved +- RFC Pull Request: [rfcs#295](https://github.com/buildpacks/rfcs/pull/295) +- CNB Pull Request: (leave blank) +- CNB Issue: N/A +- Supersedes: (put "N/A" unless this replaces an existing RFC, then link to that RFC) + +# Summary +[summary]: #summary + +The problem for adding support for multi-platform buildpacks can be divided into three parts: +1. Support buildpack authors to **migrate their existing buildpacks** to support multiple operating systems, architectures, variants and distros. +2. Support buildpack authors to **create new buildpacks and builders** that handle multi-arch from the beginning. +3. Support application developers to **create application images** using multi-arch buildpacks and builders. + +The purpose of this RFC is to solve the statement 2, adding the capability to the commands: + +- `pack buildpack package` +- `pack builder create` + +to create individuals OCI images artifacts per each os and arch (builders and buildpack packages) and handle the creation for the [image index,](https://github.com/opencontainers/image-spec/blob/master/image-index.md) +that combines them into one single consumable tag for end-users. + +# Definitions +[definitions]: #definitions + +- Buildpack: A buildpack is a set of executables that inspects your app source code and creates a plan to build and run your application. +- Builder: A builder is an image that contains all the components necessary to execute a build. A builder image is created by taking a build image and adding a lifecycle, buildpacks, and files that configure aspects of the build including the buildpack detection order and the location(s) of the run image +- Image Manifest: The image manifest provides a configuration and set of layers for a single container image for a specific architecture and operating system. See [spec](https://github.com/opencontainers/image-spec/blob/main/manifest.md) +- Image Index: The image index is a higher-level manifest which points to specific image manifests, ideal for one or more platforms. See [spec](https://github.com/opencontainers/image-spec/blob/main/image-index.md) +- Buildpack root folder: is the top-most directory where a `buildpack.toml` can be found +- Platform root folder: Based on our new folder structure, the **platform root folder** is the top-most directory that identifies a target in **buildpack root folder** + For example: + - given a target `linux/amd64` the **platform root folder** will be `/linux/amd64` + - given a target `windows/amd64:windows@10.0.20348.1970` the **platform root folder** will be `/windows/amd64/windows@10.0.20348.1970` + +# Motivation +[motivation]: #motivation + +- Why should we do this? + +The uses of ARM architecture in the cloud and edge computing has been growing rapidly. The CNCF community has been also growing in the last years, and there is a need to support multi-arch for all the projects. The buildpacks community is not an exception, issues like: +- [It would be nice to support easily creating a manifest list packages and builders](https://github.com/buildpacks/pack/issues/1460) +- [Provide a way to specify desired platform when creating packages and builders](https://github.com/buildpacks/pack/issues/1459) +- [Multi arch image build support](https://github.com/buildpacks/pack/issues/1570) + +Or the conversations around this topic in our [slack channel](https://cloud-native.slack.com/archives/C032LNSMY0P), even the [talk at Kubecon NA 2022](https://www.youtube.com/watch?v=Sdr5axlOnDI&list=PLj6h78yzYM2O5aNpRM71NQyx3WUe1xpTn&index=76) demonstrate the interest from the community in this feature. + +- What use cases does it support? + +Currently, buildpack authors can build and package their buildpacks for different OS and Architectures, but when they distribute them the URI for a buildpack can’t disambiguate, +they need to use different tags to differentiate between them. Tools like `docker buildx imagetools create` helps to create an [image index](https://github.com/opencontainers/image-spec/blob/master/image-index.md) to combine them but increasing their build pipelines complexity. +For example, take a look at this recent blog [post](https://deploy-preview-53--elegant-borg-5bd068.netlify.app/blog/steps-we-took-for-a-basic-arm64-support-in-buildpacks) or the [instructions](https://github.com/dmikusa/paketo-arm64/) created from @dmikusa to build an ARM64 builder. + +For those buildpack authors that are using `cross-compile` languages like [go](https://go.dev/) or [rust](https://www.rust-lang.org/) or maybe bash scripts, adding the capability to `pack buildpack package` and `pack builder create` to create multi-arch images +and also handle the creation of an [image index](https://github.com/opencontainers/image-spec/blob/master/image-index.md) will simplify their CI/CD pipelines and make the experience more suitable. + +- What is the expected outcome? + +`pack buildpack package` and `pack builder create` commands will be updated in a way that they will handle the creation of multi-arch OCI images + +# What it is +[what-it-is]: #what-it-is + +The end-users for this proposal are **Buildpack authors**, we expect to improve their user experience when creating multi-architecture buildpacks and builders as follows + +## Multi-arch example + +Let's suppose a **Buildpack author** has a `buildpack.toml` updated to include `targets` as follows: + +```toml +# Buildpack API version +api = "0.12" + +# Buildpack ID and metadata +[buildpack] + id = "examples/my-multiarch-buildpack" + version = "0.0.1" + +# List of targets operating systems, architectures and versions + +[[targets]] +os = "linux" +arch = "amd64" + +[[targets]] +os = "linux" +arch = "arm64" + +[[targets]] +os = "windows" +arch = "amd64" + +[[targets.distros]] +name = "windows" +version = "10.0.20348.1970" + +# Stacks (deprecated) the buildpack will work with +[[stacks]] +id = "*" +``` + +And organizes the binaries according to their os/arch with a structure similar to this one: + +```bash +my-multiarch-buildpack +. +├── buildpack.toml +├── linux +│ ├── amd64 +│ │ └── bin +│ │ ├── build +│ │ └── detect +│ └── arm64 +│ └── bin +│ ├── build +│ └── detect +└── windows + └── amd64 + └── windows@10.0.20348.1970 + └── bin + ├── build.bat + └── detect.bat +``` + +Now `pack` will be able to package them separately for each os/arch family. + +Following our [guide](https://buildpacks.io/docs/buildpack-author-guide/package-a-buildpack/) we will need to create a +`package.toml` let's suppose it looks like this: + +```toml +[buildpack] +uri = "examples/my-multiarch-buildpack" +# OR a .tgz with the previous folder structure +uri = "my-multiarch-buildpack.tgz" +``` + +In this case we **remove** the [platform](https://buildpacks.io/docs/reference/config/package-config/) section because it will be taken from the `buildpack.toml`. + +Packaging a multi-arch buildpack will require the output to be **publish** to a registry or **saved on disk** in OCI layout format. + +```bash +pack buildpack package my-buildpack --config ./package.toml --publish +# Or +pack buildpack package my-buildpack.cnb --config ./package.toml --format file +``` + +> **Important** +> pack will determine a multi-arch buildpack package is being created because there are more than one target defined. + +Each `target` entry corresponds to a different buildpack image that is exported into an [image index](https://github.com/opencontainers/image-spec/blob/master/image-index.md) + +### Package a multi-arch Builder + +In case of packing a **Builder**, we assume the following premises: + +1. Buildpack authors updated their `builder.toml` to include the new `targets` fields defined in this [RFC](https://github.com/buildpacks/rfcs/blob/main/text/0096-remove-stacks-mixins.md). +2. Multi-architecture `build`, `run` images and `buildpacks` are available for baking into the **Builder**. + +A sample `builder.toml` file looks like: + +```toml +# Buildpacks to include in builder, these buildpacks MUST be multi-arch and point to Image Index +[[buildpacks]] +uri = "" + +[run] +# Runtime images - in case of multi-arch images it must point to Image Index +[[run.images]] +image = "index.docker.io/paketobuildpacks/run-jammy-tiny:latest" + +[build] +# This image is used at build-time, in case of multi-arch images it must point to Image Index +image = "docker.io/paketobuildpacks/build-jammy-tiny:0.2.3" + +[[targets]] +os = "linux" +arch = "amd64" + +[[targets]] +os = "linux" +arch = "arm64" + +# Stack (deprecated) +[stack] +id = "io.buildpacks.stacks.jammy.tiny" +# This image is used at build-time +build-image = "docker.io/paketobuildpacks/build-jammy-tiny:0.2.3" +# This image is used at runtime +run-image = "index.docker.io/paketobuildpacks/run-jammy-tiny:latest" +``` + +As we can see, the proposal is based on the assumption that the `run-image`, `build-image` and `buildpacks` to include +in the builder are **multi-arch artifacts**, and we can reach them by reading an +[image index](https://github.com/opencontainers/image-spec/blob/master/image-index.md) + +Packaging a multi-arch builder will require the output to be **publish** to a registry. + +```bash +pack builder create my-jammy-builder --config ./builder.toml --publish +``` +> **Important** +> Similar to the `buildpack package,` pack will determine a multi-arch builder must be created based on the multiple targets defined. + +In this case `pack` will follow the builder creation process for **each provided target**, +pulling the correct (based on os/arch) buildpacks, build and run images and creating different builders images that are +exported and combined into an [image index](https://github.com/opencontainers/image-spec/blob/master/image-index.md) + +# How it Works +[how-it-works]: #how-it-works + +## Buildpack Package + +As a quick summary, our current process to create a buildpack package involves: + +- The end-users defined `os` for the OCI image using the [package.toml](https://buildpacks.io/docs/reference/config/package-config/). +- The only values allowed are `linux` and `windows` and by default when is not present, `linux` is being used. +- When exporting to daemon, the `docker.OSType` must be equal to `platform.os` +- When building a single buildpack package, `package.toml` is optional + +### To keep compatibility + +We propose: +- Deprecate the `platform.os` field from [package.toml](https://buildpacks.io/docs/reference/config/package-config/). It will be removed after two pack releases with the new feature +- When `platform.os` is present in [package.toml](https://buildpacks.io/docs/reference/config/package-config/), throw a warning messages indicating the field will be removed +and `--target` flag must be used +- When `platform.os` is not present in [package.toml](https://buildpacks.io/docs/reference/config/package-config/) and `--target` flag is not used, throw a warning messages indicating +a new `--target` flag is available and how to use it, or some helpful information on how to add `targets` to the `buildpack.toml` +- Keep doing our current process to package a buildpack + +### To improve user experience + +We propose: +- Add a new `--target` flag using the format `[os][/arch][/variant]:[name@version]` to build for a particular target, once the `platform.os` field is removed, +this will be the way for end-users to specify the platform for which they want to create single OCI artifact. + +- Add `targets` section to `buildpack.toml` to help Buildpack Authors to include support for new platforms without having to update their `pack buildpack package` command in their CI/CD pipelines + +- A new folder structure to organize the buildpacks binaries for multi-platform images, similar to this one: +```bash +# Option 1 - no variant is required +. +├── buildpack.toml // mandatory +└── {os} // optional + └── {arch} // optional (becomes the platform root folder) + └── bin + ├── build // platform dependent binary (mandatory) + └── detect // platform dependent binary (mandatory) + +# Option 2 - variant is required +. +├── buildpack.toml // mandatory +└── {os} // optional + └── {arch} // optional + └── {variant} // optional + ├── {name@version-1} // optional (becomes the platform root folder) + │ └── bin + │ ├── build // platform dependent binary (mandatory) + │ └── detect // platform dependent binary (mandatory) + └── {name@version-2} // optional (becomes the platform root folder) + └── bin + ├── build // platform dependent binary (mandatory) + └── detect // platform dependent binary (mandatory) +``` +- `buildpack.toml` file MUST be present in the **buildpack root folder** +- For each platform, Buildpack Authors are responsible for copying or creating symlink or hard link for files into each **platform root folder** + +> **Note** +> For cross-compile buildpacks like Paketo, it looks easy to add a step to their Makefile to compile and separate the binaries following this structure. It is important to mention +> that the final buildpack image will not change, this will only change the buildpack structure from `pack` perspective + +In case this folder structure is not suitable for Buildpack Authors, **we propose** a new `path` attribute to be included +in the `targets` section of the `buildpack.toml`, to specify where the **buildpack root directory** is located in the filesystem. + +Based on the [RFC-0096](https://github.com/buildpacks/rfcs/blob/main/text/0096-remove-stacks-mixins.md) the new `buildpack.toml` schema will look like this: + +```toml +[[targets]] +os = "" +arch = "" +variant = "" +# optional +path = "" + +[[targets.distros]] +name = "" +version = "" +``` +- When `more than 1 target is defined` + - When `--publish` is specified + - For each `target` an OCI image will be created, following these rules + - `pack` will determine the **platform root folder**, this is the specific root folder for a given `target` (based on the `targets.path` in the buildpack.toml or inferring it from a folder structure similar to the one show above) + - `pack` will execute the current process to create a buildpack package using the **platform root folder** and the `target` values + - If more than 1 OCI image was created, an [image index](https://github.com/opencontainers/image-spec/blob/master/image-index.md) will be created to combine them + - When `--format file` is specified AND `` is the expected name for the `.cnb` file + - For each `target` an OCI layout file will be created, following these rules + - `pack` will determine the **platform root folder**, this is the specific root folder for a given `target` (based on the `targets.path` in the buildpack.toml or inferring it from a folder structure similar to the one show above) + - `pack` will execute the current process to create a buildpack package (.cnb file) using the **platform root folder** and the `target` values + - `pack` will saved on disk the `.cnb` file with a name `-[os][-arch][-variant]-[name@version].cnb` + - When `--daemon` is specified + - `pack` can keep using `docker.OSType` to determine the target `os` and probably can do some validations it the `os` is valid target + +### Examples + +Let's use some examples to explain the expected behavior in different use cases + +#### How to determine the platform root folder + +Let's suppose the Buildpack Author creates a multi-platform folder structure and wants to create multiple buildpack packages + +```bash +. +├── buildpack.toml +└── linux + ├── amd64 + │ └── bin + │ ├── build + │ ├── detect + │ └── foo + └── arm64 + ├── foo + └── bin + ├── build + ├── detect + └── bar +``` + +- When `linux/amd64` the **platform root folder** determined is `/linux/amd64`, and the expected +folder structure in the OCI image for each buildpack package will be: + +```bash + +. +└── cnb + └── buildpacks + └── {ID} + └── {version} + ├── bin + │ ├── build + │ ├── detect + │ └── foo // specific platform binary + └── buildpack.toml +``` + +On the other hand, When target is `linux/arm64`, the **platform root folder** determined is `/linux/arm64` +and the output OCI image folder structure looks like: +```bash +. +└── cnb + └── buildpacks + └── {ID} + └── {version} + ├── bin + │ ├── bar // specific platform binary + │ ├── build + │ └── detect + ├── buildpack.toml + └── foo +``` + +#### Buildpacks authors do not use targets or the new folder structure + +This seems to be the case for [Paketo Buildpacks](https://github.com/paketo-buildpacks/maven) +or [Heroku](https://github.com/heroku/buildpacks-jvm/tree/main/buildpacks/maven), and it represents how `pack` +will work for most the users when new behavior is implemented + +A simplified version of Buildpack Authors folder structures is: + +```bash +├── bin +│ ├── build +│ └── detect +├── buildpack.toml +└── package.toml +``` + +In these cases: We expect `pack` to keep doing what is doing today, but with the warning messages we mentioned above to +let end users know things are changing. + +```bash +pack buildpack package --config ./package.toml --publish +Warning: A new '--target' flag is available to set the platform for the buildpack package, using 'linux' as default +Successfully published package and saved to registry + +# Or +pack buildpack package --config ./package.toml --format file +Warning: A new '--target' flag is available to set the platform for the buildpack package, using 'linux' as default +Successfully created package and saved to file + ``` +**Output**: pack will create a buildpack package image (as it is doing it today) with the provided binaries and a +[configuration](https://github.com/opencontainers/image-spec/blob/main/config.md#properties) with the following target +platform: + +```json +{ + "architecture": "", + "os": "linux" +} +``` + +After checking the **warning** messages, some end users must feel curious, and the try to use the new `--target` flag. + +```bash +pack buildpack package --config ./package.toml --publish --target linux/arm64 +Successfully published package and saved to registry + +# Or +pack buildpack package --config ./package.toml --format file --target linux/arm64 +Successfully created package and saved to file +``` + +**Output**: In these cases, pack will create buildpack a package image with the provided binaries and a +[configuration](https://github.com/opencontainers/image-spec/blob/main/config.md#properties) with the following target +platform: + +```json +{ + "architecture": "arm64", + "os": "linux" +} +``` + +> **Important** +> Pack will assume the binaries are appropriate for the given target platform, what the flag is doing is expose a mechanism +> to update the metadata present in the OCI config file + +what about creating a multi-platform image for several targets? + +```bash +pack buildpack package --config ./package.toml --publish --target linux/arm64 --target linux/amd64 +A multi-platform buildpack package will be created for: 'linux/amd64', 'linux/arm64' +Successfully published package and saved to registry + +# Or +pack buildpack package --config ./package.toml --format file --target linux/arm64 --target linux/amd64 +A multi-arch buildpack package will be created for target platforms: 'linux/amd64', 'linux/arm64' +Successfully created package and saved to file +``` + +**Output**: two OCI images, with the same binaries, will be created and pushed into the registry, for each image the +configuration file will be created with the correct `os` and `architecture` and an +[image index](https://github.com/opencontainers/image-spec/blob/master/image-index.md) will be created to combine them +using the `` name provided. The content of the [image index](https://github.com/opencontainers/image-spec/blob/master/image-index.md) +will be similar to: + +```json + +{ + "manifests": [ + { + "digest": "sha256:b492494d8e0113c4ad3fe4528a4b5ff89faa5331f7d52c5c138196f69ce176a6", + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "platform": { + "architecture": "amd64", + "os": "linux" + }, + "size": 424 + }, + { + "digest": "sha256:2589fe6bcf90466564741ae0d8309d1323f33b6ec8a5d401a62d0b256bcc3c37", + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "platform": { + "architecture": "arm64", + "os": "linux" + }, + "size": 424 + } + ], + "mediaType": "application/vnd.oci.image.index.v1+json", + "schemaVersion": 2 +} +``` + +#### Buildpacks authors do not use targets AND `platform.os` is present at `package.toml` + +Let's suppose the `package.toml` has the following: + +```toml +[buildpack] +uri = "" + +[platform] +os = "linux" +``` +These cases are similar to the previous one, but the warning message will be changed. + +```bash +pack buildpack package --config ./package.toml --publish +Warning: 'platform.os' field in package.toml will be deprecated, use new '--target' flag or `targets` field in buildpack.toml to set the platform. +Successfully published package and saved to registry + +# Or +pack buildpack package --config ./package.toml --format file +Warning: 'platform.os' field in package.toml will be deprecated, use new '--target' flag or `targets` field in buildpack.toml to set the platform. +Successfully created package and saved to file +``` +**Output**: The OCI Image [configuration](https://github.com/opencontainers/image-spec/blob/main/config.md#properties) file will have: + +```json +{ + "architecture": "", + "os": "linux" +} +``` + +Trying to use `--target` flag with `platform.os` field at the same time should throw an error, in this way, the end-user will need to update +their `package.toml` + +```bash +pack buildpack package --config ./package.toml --publish --target linux/arm64 +# Or +pack buildpack package --config ./package.toml --format file --target linux/arm64 + +Error: 'platform.os' and '--target' flag can not be used in conjunction, please remove 'platform.os' from package.toml, use new '--target' flag + or `targets` field in buildpack.toml to set the platform +``` + +#### Buildpacks authors use targets + +> **Important** +> `pack` considers the use of `targets` as an acknowledgement of expecting a multi-arch images as output. Also, we expect +> `platform.os` do not be present in `buildpack.toml` + +We can divide the problem in two main scenarios: Buildpack authors use or not use the new folder structure. + +##### New folder structure is not use + +Let's start with the first one, which is the natural path for Buildpack Authors that are using `bash` buildpacks. +Let's suppose a buildpack folder structure like: + +```bash +├── bin +│ ├── build +│ └── detect +└── buildpack.toml +``` + +And a `buildpack.toml` with `targets` defined as: + +```toml +[[targets]] +os = "linux" +arch = "amd64" + +[[targets]] +os = "linux" +arch = "arm64" + +# Stacks (deprecated) the buildpack will work with +[[stacks]] +id = "*" +``` + +```bash +pack buildpack package --config ./package.toml --publish +A multi-arch buildpack package will be created for target platforms: 'linux/amd64', 'linux/arm64' +Successfully published package and saved to registry + +# Or +pack buildpack package --config ./package.toml --format file +A multi-arch buildpack package will be created for target platforms: 'linux/amd64', 'linux/arm64' +Successfully created package and saved to file +``` + +**Output**: In this case, two OCI images will be created and pushed into the registry, for each image the configuration file will be +created with the correct `os` and `architecture` and +an [image index](https://github.com/opencontainers/image-spec/blob/master/image-index.md) will be created to combine them, +with a content similar to: + +```json + +{ + "manifests": [ + { + "digest": "sha256:b492494d8e0113c4ad3fe4528a4b5ff89faa5331f7d52c5c138196f69ce176a6", + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "platform": { + "architecture": "amd64", + "os": "linux" + }, + "size": 424 + }, + { + "digest": "sha256:2589fe6bcf90466564741ae0d8309d1323f33b6ec8a5d401a62d0b256bcc3c37", + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "platform": { + "architecture": "arm", + "os": "linux" + }, + "size": 424 + } + ], + "mediaType": "application/vnd.oci.image.index.v1+json", + "schemaVersion": 2 +} +``` + +On the other hand, when end-users use the new `--target` flag, they can create a single OCI artifact + +```bash +pack buildpack package --config ./package.toml --publish --target linux/arm64 +Successfully published package and saved to registry +``` + +**Output**: The OCI Image [configuration](https://github.com/opencontainers/image-spec/blob/main/config.md#properties) file will have: + +```json +{ + "architecture": "amd64", + "os": "linux" +} +``` + +In case of targeting the daemon, pack will match **daemon os/arch** with the **targets os/arch**, for example when running +on a `linux/arm64` machine. + +```bash +pack buildpack package --config ./package.toml +Successfully created package and saved to docker daemon +``` + +**Output**: pack will create a buildpack package image with the provided binaries and a +[configuration](https://github.com/opencontainers/image-spec/blob/main/config.md#properties) with the following target +platform: + +```json +{ + "architecture": "arm64", + "os": "linux" +} +``` + +But, if we execute the same command on a **windows/amd64** machine, the `buildpack.toml` doesn't contain any `target` that +matches the **daemon os/arch**, an error must be thrown +```bash +pack buildpack package --config ./package.toml +Error: daemon platform 'windows/amd64' does not match target platforms: 'linux/amd64', 'linux/arm64' +``` + +##### New folder structure is use + +Finally, let's check some examples for the second scenario, when Buildpack Authors want to take advantage of the new +multi-architecture capabilities, let's use our original folder structure: + +```bash +. +├── buildpack.toml +├── linux +│ ├── amd64 +│ │ └── bin +│ │ ├── build +│ │ └── detect +│ └── arm64 +│ └── bin +│ ├── build +│ └── detect +└── windows + └── amd64 + └── windows@10.0.20348.1970 + └── bin + ├── build.bat + └── detect.bat +``` + +And a `buildpack.toml` with the following `targets` defined: + +```toml +[[targets]] +os = "linux" +arch = "amd64" + +[[targets]] +os = "linux" +arch = "arm64" + +[[targets]] +os = "windows" +arch = "amd64" + +[[targets.distros]] +name = "windows" +version = "10.0.20348.1970" + +# Stacks (deprecated) the buildpack will work with +[[stacks]] +id = "*" +``` + +When Buildpack Authors want to create a multi-arch images, they can execute the following command: +```bash +pack buildpack package --config ./package.toml --publish +Info: A multi-platform buildpack package will be created for targets: 'linux/amd64', 'linux/arm64', 'windows/amd64' +Successfully published package and saved to registry +``` +A fully multi-arch buildpack will be created automatically, because we have more than one target defined +in the `buildpack.toml` + +**Output**: In this case, three OCI images will be created and pushed into the registry, for each image the configuration file will be +created with the correct target: `os` and `architecture`, +an [image index](https://github.com/opencontainers/image-spec/blob/master/image-index.md) will be created to combine them, +with a content similar to: + +```json + +{ + "manifests": [ + { + "digest": "sha256:b492494d8e0113c4ad3fe4528a4b5ff89faa5331f7d52c5c138196f69ce176a6", + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "platform": { + "architecture": "amd64", + "os": "linux" + }, + "size": 424 + }, + { + "digest": "sha256:2589fe6bcf90466564741ae0d8309d1323f33b6ec8a5d401a62d0b256bcc3c37", + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "platform": { + "architecture": "arm", + "os": "linux" + }, + "size": 424 + }, + { + "digest": "sha256:ed1a67bb47f3c35d782293229127ac1f8d64873a131186c49fe079dada0fa7e0", + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "platform": { + "architecture": "amd64", + "os": "windows", + "os.version": "10.0.20348.1970" + }, + "size": 424 + } + ], + "mediaType": "application/vnd.oci.image.index.v1+json", + "schemaVersion": 2 +} +``` + +If the Buildpack Author wants to create a single buildpack package they will use the `target` flag, similar to our previous +examples. + +## Composite Buildpack Package + +When packaging a composite buildpack we need a `package.toml` to declare the dependencies, this could be improved and there is an [issue](https://github.com/buildpacks/pack/issues/1082) for it +but today the `package.toml` is mandatory on `pack`. Also, it's important to remember that **we can't** use `targets` in the `buildpack.toml` when we also need to declare an `order` so this open +the question: + +**Where do we define targets for composite buildpacks?** +The natural answer will be `package.toml`, as it already defines the dependencies, it seems very straight forward to include this section for this particular case. The new schema will look like: + +```toml +[buildpack] +uri = "" + +[[targets]] +os = "" +arch = "" +variant = "" + +[[targets.distros]] +name = "" +version = "" + +[[dependencies]] +uri = "" + +# Deprecated +[platform] +os = "" +``` + +This information will help `pack` to determine a multi-arch composite buildpack is expected, but there is another +problem to solve, currently, the dependencies can be located in several places: + - OCI Registry + - Local file in the filesystem (.cnb) file + - Local folder in the filesystem + - A .tar.gz file in a remote S3 bucket accessible through HTTPS + +**How will pack find the correct artifact for each target?** + +For the OCI registry case, we'd expect Buildpack Authors to release multi-arch single buildpacks behind an +[image index](https://github.com/opencontainers/image-spec/blob/master/image-index.md) and pulling these dependencies +will be a natural process, this will be the **only valid** locator in the `dependencies.uri` in cases where a multi-arch +composite buildpack is expected to be built. + +## Builder + +Similar to how we did it for the `buildpack package`, lets summaries, our current process to create a **Builder**: + +- We read the `builder.toml` and fetch the `build.image`, currently we didn't specify the `platform`, **daemon** `os/arch` is being used. +- We create a **base builder** from the `build.image`. +- We read the `os` and `architecture` from the **base builder** + - Fetch the `run.image`, matching the `os/arch` with the values from the **base builder** + - Fetch the `lifecycle` image that matches the platform (Note: lifecycle is already publish behind an [image index](https://github.com/opencontainers/image-spec/blob/master/image-index.md)) + - We add `Buildpacks` and `Extensions` to the **base builder** trying to match the **base builder** `os/arch`, in case the `architecture` doesn't match, we fall back to match the **base builder** `os` +- More logic and finally the **Builder** image is created + +### To keep compatibility + +We propose: + +- When `stacks` is present in [builder.toml](https://buildpacks.io/docs/reference/config/builder-config/), throw a warning message indicating the field is deprecated and +it will be removed +- When `targets` is not present in [builder.toml](https://buildpacks.io/docs/reference/config/builder-config/), throw a warning messages indicating + a new `--target` flag is available +- Keep doing our current process to create a builder + +### To improve user experience + +We propose: + +- Add `targets` section to the `builder.toml` schema, this will keep consistency for end-users to understand how to +define multi-architecture. Adding more than one target to the `builder.toml` will be considered by `pack` as an +acknowledgement of the desire to generate [Builders](https://buildpacks.io/docs/concepts/components/builder/) with multiple platform targets. + +The new schema will be +similar to: +```toml +# Buildpacks to include in builder, +# MUST point to an Image Index that matches targets +[[buildpacks]] +uri = "" + +[run] +# Runtime images +# MUST point to an Image Index that matches targets +[[run.images]] +image = "" + +[build] +# This image is used at build-time +# MUST point to an Image Index that matches targets +image = "" + +# Target platforms to support with the Builder +[[targets]] +os = "" +arch = "" +variant = "" +[[targets.distros]] +name = "" +version = "" +``` +- Add a new `--target` optional flag with format `[os][/arch][/variant]:[name@version]` to create a builder for a +particular target, this will help end-users to specify the platform for which they want to create single OCI artifact. + +### Examples + +Let's use some examples to explain the expected behavior in different use cases + +#### `Targets` are not present in `builder.toml` + +This is probably the case for most of the Buildpack Authors, for example +[Paketo](https://github.com/paketo-buildpacks/builder-jammy-tiny/blob/main/builder.toml), lets suppose a`buildpack.toml` +like: + +```toml +# Buildpacks to include in builder +[[buildpacks]] +uri = "" + +# Order used for detection +[[order]] +[[order.group]] +id = "" +version = "" + +[stack] +id = "io.buildpacks.samples.stacks.jammy" +build-image = "cnbs/sample-base-build:jammy" +run-image = "cnbs/sample-base-run:jammy" +``` +Or we include `build` and `run` images + +```toml +# Base images used to create the builder +[build] +image = "cnbs/sample-base-build:jammy" +[run] +[[run.images]] +image = "cnbs/sample-base-run:jammy" +``` + +In these cases, the expected output will be similar to: + +```bash +pack builder create --config ./builder.toml +Warning: "stack" has been deprecated, prefer "targets" instead: https://github.com/buildpacks/rfcs/blob/main/text/0096-remove-stacks-mixins.md +Warning: A new '--target' flag is available to set the target platform for the builder, using 'linux/amd64' as default +Successfully created builder image +Tip: Run pack build --builder to use this builder +``` +We expect the command to keep working as today, the builder image will be created but some **warning** messages will be +printed to help end-users to check for new updates, maybe link to a migration guide? + +Trying to use the new flags: + +```bash +pack builder create --config ./builder.toml --target linux/arm64 +Warning: "stack" has been deprecated, prefer "targets" instead: https://github.com/buildpacks/rfcs/blob/main/text/0096-remove-stacks-mixins.md +Warning: creating a builder for platform "linux/arm64" but "targets" is not defined, update your "builder.toml" to include "targets" +Successfully created builder image +Tip: Run pack build --builder to use this builder +``` + +**Output**: Pulling operations will be configured to use `linux/arm64` as target platform, +the OCI Image [configuration](https://github.com/opencontainers/image-spec/blob/main/config.md#properties) file will have: + +```json +{ + "architecture": "arm64", + "os": "linux" +} +``` + +What about multi-architecture builders? + +Using `target` flag: + +```bash +pack builder create --config ./builder.toml \ + --target linux/amd64 \ + --target linux/arm64 \ + --publish +Successfully created builder image +Tip: Run pack build --builder to use this builder +``` + +**Output**: two OCI images will be created and pushed into the registry, for each image the configuration file will be +created with the correct target: `os` and `architecture`, an +[image index](https://github.com/opencontainers/image-spec/blob/master/image-index.md) will be created to combine them + +#### `Targets` are present in `builder.toml` + +Let's suppose a `builder.toml` similar to this one: + +```toml +[[buildpacks]] +id = "samples/java-maven" +version = "0.0.1" +uri = "" + +[[order]] +[[order.group]] +id = "samples/java-maven" +version = "0.0.1" + +[build] +image = "cnbs/sample-base-build:jammy" +[run] +[[run.images]] +image = "cnbs/sample-base-run:jammy" + +[[targets]] +os = "linux" +arch = "amd64" + +[[targets]] +os = "linux" +arch = "arm64" +``` + +Let's suppose we execute the command against a daemon running in a `linux/amd64` machine + +```bash +pack builder create --config ./builder.toml +Info: creating a builder for target "linux/amd64" +Successfully created builder image +Tip: Run pack build --builder to use this builder +``` + +**Output**: We keep our current behavior and detect the `os` and `architecture` from the daemon. Because there is `target` +that matches the daemon `os/arch` the builder is being built. + + + +What about multi-architecture builders? + +```bash +pack builder create --config ./builder.toml --publish +Info: A multi-platform builder will be created for targets: 'linux/amd64', 'linux/arm64' +Successfully created builder image +Tip: Run pack build --builder to use this builder +``` + +Using `target` flag: + +```bash +pack builder create --config ./builder.toml \ + --target linux/amd64 \ + --target linux/arm64 \ + --publish +Info: A multi-platform builder will be created for targets: 'linux/amd64', 'linux/arm64' +Successfully created builder image +Tip: Run pack build --builder to use this builder +``` + +**Output** In both cases, two OCI images will be created and pushed into the registry, for each image the configuration file will be +created with the correct target platform: `os` and `architecture`, +an [image index](https://github.com/opencontainers/image-spec/blob/master/image-index.md) will be created to combine them + + +# Migration +[migration]: #migration + +1. Align with the Stack removal [plan](https://docs.google.com/document/d/1bExpr31U5R5yQ6fncpl5YcdosWVwYcXgkt12vE-lpvU/edit) +2. Deprecate the `platform.os` field from [package.toml](https://buildpacks.io/docs/reference/config/package-config/) + - We don't want to break current behavior, but we do want community migrate to the new approach +3. Update docs to explain the new functionality, blog posts or any other useful media communicate the message +4. Remove `platform.os` support on `pack` + +# Drawbacks +[drawbacks]: #drawbacks + +- New complexity will be added into `pack` + +# Alternatives +[alternatives]: #alternatives + +- Do nothing, Buildpack Authors can keep using other tools like`docker buildx imagetools create` or `crane` to update the `architecture` in their Manifest files or +create [image indexes](https://github.com/opencontainers/image-spec/blob/master/image-index.md). +- Do not deprecate `platform.os` field from [package.toml](https://buildpacks.io/docs/reference/config/package-config/) and add more fields to get the same result, instead of flags + - I didn't explore this idea + +# Prior Art +[prior-art]: #prior-art + +- Stack Removal [RFC #096](https://github.com/buildpacks/rfcs/blob/jjbustamante/feature/multi-arch-phase-2/text/0096-remove-stacks-mixins.md) +- This RFC is a continuation of the work started with the proposal to add commands to handle manifest list in pack, see the [RFC](https://github.com/buildpacks/rfcs/pull/283) +- Paketo [RFC #288](https://github.com/paketo-buildpacks/rfcs/pull/288) to publish multi-arch buildpacks + +# Unresolved Questions +[unresolved-questions]: #unresolved-questions + +- How would I add support for a new platform to an existing [image index](https://github.com/opencontainers/image-spec/blob/master/image-index.md)? +- What are the intermediate images for each target named/called? +- What happen if I want to exclude some buildpack for a particular target? +- What happen if I want to include the same file or folder for every image, do I have to copy then inside the {os}-{arch} folder? +- Initially we proposed a shared file strategy but, we decided to leave that complexity out of the scope of this RFC and can be +revisited later if it required + +# Spec. Changes (OPTIONAL) +[spec-changes]: #spec-changes + +NA + +# History +[history]: #history + + diff --git a/text/0129-export-run-image-metadata.md b/text/0129-export-run-image-metadata.md new file mode 100644 index 000000000..124b24088 --- /dev/null +++ b/text/0129-export-run-image-metadata.md @@ -0,0 +1,141 @@ +# Meta +[meta]: #meta +- Name: Export Run Image Metadata +- Start Date: 2024-04-11 +- Author(s): joeybrown-sf +- Status: Approved +- RFC Pull Request: https://github.com/buildpacks/rfcs/pull/313 +- CNB Pull Request: (leave blank) +- CNB Issue: https://github.com/buildpacks/lifecycle/issues/1372 +- Supersedes: N/A + +# Summary +[summary]: #summary + +The `report.toml` file created by the lifecycle exporter and rebaser should include the following properties: +- `run-image.image` +- `run-image.mirrors` +- `run-image.reference` +- `run-image.top-layer` + +These values are not necessarily known prior to export or rebase they can be critical to a platform rebase process. + +# Motivation +[motivation]: #motivation + +Platform operators may need a comprehensive understanding of images on their platform in order to make decisions about rebase and image publishing. Run image metadata is likely part of this comprehensive understanding for rebase. It is likely that this data may only be known after an image is created or rebased, and today it is only accessible via reading the image. Therefore, in order to access this metadata, platform operators must query the image. + +Querying the docker daemon or querying an image registry is suboptimal and we should make this data more accessible. It is suboptimal because it requires the platform to run an additional service to query the data it just published. If we make this data more accessible, we could potentially reduce image queries (registry calls) calls by a significant factor. + +Putting this data into `report.toml` is advantageous over other methods, especially when considering the kubernetes `terminationMessagePath` message [pattern](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#pod-v1-core). In this pattern, the content of `report.toml` can be used as a container's termination message, making this data easily accessible after an image is exported or rebased within a kubernetes container. + +# What it is +[what-it-is]: #what-it-is + +These values will be included when the lifecycle exporter/creator/rebaser binary writes `report.toml`. + +Here are two examples of `report.toml` content. (Other values are omitted for readability.) + +#### Image published to a registry: +``` +[image] +tags = ... +digest = ... +image-id = ... +manifest-size = ... +run-image.image = "run/name:foo" +run-image.reference = "index.docker.io/run/name@sha256:94f85561b0976bf1e2bef6b14de92299ebcd4c8148802cf9b217654651e4f416" +run-image.top-layer = "sha256:83ad2f0b091621ce19357e19d853c8be1b8f4d60d99c281fc2db75e0f56df42a" +run-image.mirrors = ["", ""] +``` + +#### Image exported to the docker daemon: +``` +[image] +tags = ... +digest = ... +image-id = ... +manifest-size = ... +run-image.image = "run/name:foo" +run-image.reference = "5b90f9c0e189" +run-image.top-layer = "sha256:83ad2f0b091621ce19357e19d853c8be1b8f4d60d99c281fc2db75e0f56df42a" +run-image.mirrors = ["", ""] +``` + +# How it Works +[how-it-works]: #how-it-works + +This metadata is readily available when `report.toml` is created, so it will be straight-forward to extend `report.toml`. + +# Migration +[migration]: #migration + +N/A + +This is an additive change to a metadata file and will be backwards compatible. + +# Drawbacks +[drawbacks]: #drawbacks + +This metadata is written to the [`lifecycle.metadata` label](https://github.com/buildpacks/spec/blob/main/platform.md#iobuildpackslifecyclemetadata-json) and it can be accessed by querying a docker daemon or registry. So we will be writing this data to two outputs. + +# Alternatives +[alternatives]: #alternatives + +1. Do nothing and continue to require platforms to retrieve this information via alternative means--either querying the docker daemon or registry. + - Rebase process may remain suboptimal for some platform providers. + +2. Write all the metadata labels to `report.toml`. + - This could break platform operators that are using the `terminationMessagePath` [pattern](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#pod-v1-core). Because some of the metadata includes unbounded arrays, we could explode this report beyond the max size of 4096 bytes. + +3. Write another file that contains this metadata (and potentially more metadata). + - If we consider this approach and take it to the logical conclusion, we should consider writing a sparse image as output. A sparse image would contain all the metadata available, and it would be in a well-known format that other image tools like `crane`, `skopeo`, or `pack` can read. + - Writing to another file is not as simple as writing to `report.toml`. + - It increases the complexity of lifecycle exporter & rebaser. + - Writing to another file would not give platform operators the advantage of the `terminationMessagePath` [pattern](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#pod-v1-core), because they are likely already reading `report.toml`. + +# Prior Art +[prior-art]: #prior-art + +These values are written to image labels. And they are among values that `pack inspect` returns. + +`report.toml` was introduced in [RFC 0040](/text/0040-export-report.md). That RFC includes some context for why `report.toml` exists. + +# Unresolved Questions +[unresolved-questions]: #unresolved-questions + +N/A + +# Spec. Changes +[spec-changes]: #spec-changes + +The following would be appended to the [`report.toml` spec](https://github.com/buildpacks/spec/blob/main/platform.md#reporttoml-toml) (this section would be materially identical to the [`lifecycle.metadata` label](https://github.com/buildpacks/spec/blob/main/platform.md#iobuildpackslifecyclemetadata-json)): + +> runImage.topLayer MUST contain the uncompressed digest of the top layer of the run-image. +> +> runImage.reference MUST uniquely identify the run image. It MAY contain one of the following +> - An image ID (the digest of the uncompressed config blob) +> - A digest reference to a manifest stored in an OCI registry +> +> runImage.image and runImage.mirrors MUST be resolved from run.toml from the given + +# History +[history]: #history + + diff --git a/text/0130-oci-image-annotations.md b/text/0130-oci-image-annotations.md new file mode 100644 index 000000000..0e5e8f3c1 --- /dev/null +++ b/text/0130-oci-image-annotations.md @@ -0,0 +1,116 @@ +# Meta +[meta]: #meta +- Name: OCI Image Annotations on Buildpacks +- Start Date: 2024-06-26 +- Author(s): @candrews +- Status: Approved +- RFC Pull Request: [rfcs#314](https://github.com/buildpacks/rfcs/pull/314) +- CNB Pull Request: (leave blank) +- CNB Issue: https://github.com/buildpacks/rfcs/issues/318 +- Supersedes: (put "N/A" unless this replaces an existing RFC, then link to that RFC) + +# Summary +[summary]: #summary + +The `pack` tool should set OCI annotations on the OCI artifacts it produces providing users of these buildpacks with a consistent, standard mechanism for gathering information about the buildpack, including how to find its documentation and what version it is. The annotation values can be gathered from existing data sources (such as `buildpack.toml`) therefore not requiring any additional effort on the part of users of the `pack` tool. + +# Definitions +[definitions]: #definitions + +[Buildpacks](https://buildpacks.io/docs/for-app-developers/concepts/buildpack/) are [OCI images](https://github.com/opencontainers/image-spec/blob/v1.1.0/README.md). [Annotations](https://github.com/opencontainers/image-spec/blob/v1.1.0/annotations.md) are optional properties that can be applies to image manifests and descriptions providing mechanism to communicate metadata. The [Pre-Defined Annotation Keys](https://github.com/opencontainers/image-spec/blob/v1.1.0/annotations.md#pre-defined-annotation-keys) are a standardized set of annotations that can be used to convey metadata in a consistent way between image authors and users. + +# Motivation +[motivation]: #motivation + +Knowing the origin and other metadata for a buildpack (which is an OCI image) is very helpful. Some examples of such use cases include finding release notes, user manuals, bug reporting procedures, and license information. Currently, it can be difficult to find the source control repository of a buildpack as that information is not available in a standard way. + +The OCI Image Format Specification's Pre-Defined Annotation Keys provide a standardized way to discover additional information about an OCI image. Because these annotations are standardized and widely used, tools have come to use them. For example, [Snyk](https://snyk.io/blog/how-and-when-to-use-docker-labels-oci-container-annotations/) and [Renovate](https://github.com/renovatebot/renovate/blob/34.115.1/lib/modules/datasource/docker/readme.md) use these annotations. + +The outcome will be that users and tools will be able to gather more information about buildpacks, facilitating use cases such as gathering releases notes and finding documentation. + +# What it is +[what-it-is]: #what-it-is + +`pack buildpack package` should set the following OCI annotations on the images it produces: + +- `org.opencontainers.image.source` (when possible) +- `org.opencontainers.image.revision` (when possible) +- `org.opencontainers.image.title` +- `org.opencontainers.image.version` +- `org.opencontainers.image.url` (when possible) +- `org.opencontainers.image.description` (when possible) + +The target personas as buildpack users, platform operators, and platform implementers. Any of those groups will be able to more easily understand the origin (source), version, and other information about the buildpack. This information can then be used manually or with the aid of tools to get release notes which aid these personas in making informed decisions. + +# How it Works +[how-it-works]: #how-it-works + +When packaging the buildpack, the `pack` tool can get the values for the `org.opencontainers.image.source` and `org.opencontainers.image.revision` annotations from git. `org.opencontainers.image.source` is derived from the git origin and `org.opencontainers.image.revision` is the git commit hash. + +The other annotation values come from `buildpack.toml` mapped to OCI annotations as follows: + +- `name` -> `org.opencontainers.image.title` +- `version` -> `org.opencontainers.image.version` +- `homepage` (optional) -> `org.opencontainers.image.url` +- `description` (optional) -> `org.opencontainers.image.description` + +The following example values are from [Paketo Buildpack for Java 13.0.1](https://github.com/paketo-buildpacks/java/releases/tag/v13.0.1): + +- `org.opencontainers.image.source`: https://github.com/paketo-buildpacks/java +- `org.opencontainers.image.revision`: 09747b1df0a56aea74ce9b01af89df6feb1fc50a +- `org.opencontainers.image.title`: Paketo Buildpack for Java +- `org.opencontainers.image.version`: 13.0.1 +- `org.opencontainers.image.url`: https://paketo.io/docs/howto/java +- `org.opencontainers.image.description`: A Cloud Native Buildpack with an order definition suitable for Java applications + +# Migration +[migration]: #migration + +The `pack` tool would be modified to set the annotations. Because the tool _should_ set these annotations (not _must_ set), buildpacks created with earlier versions of the tool are still considered to be valid in accordance with the distribution specification. + +# Drawbacks +[drawbacks]: #drawbacks + +N/A + +# Alternatives +[alternatives]: #alternatives + +Instead of standardizing the use of these annotations across all buildpacks, each buildpack could add the annotations individually. However, that approach has significant consistency and maintainability concerns. Standardizing the annotations and implementing them consistently across all buildpacks minimizes risk and maximizes utility. If this approach is not done, users will continue to be unable to use tools to gather buildpack information, and gathering that information manually will continue to be difficult or impossible. + +# Prior Art +[prior-art]: #prior-art + +Many images are setting OCI image annotations with adoption continually on the rise. + +# Unresolved Questions +[unresolved-questions]: #unresolved-questions + +N/A + +# Spec. Changes (OPTIONAL) +[spec-changes]: #spec-changes + +The [distribution spec](https://github.com/buildpacks/spec/blob/main/distribution.md) would be updated to document the OCI image annotations as covered in ["How it Works"](#how-it-works). + + +# History +[history]: #history + + \ No newline at end of file diff --git a/text/0131-build-observability.md b/text/0131-build-observability.md new file mode 100644 index 000000000..235ce5798 --- /dev/null +++ b/text/0131-build-observability.md @@ -0,0 +1,411 @@ +# Meta +[meta]: #meta +- Name: Buildpack Observability +- Start Date: 2022-10-05 +- Author(s): @joshwlewis +- Status: Approved +- RFC Pull Request: [rfcs#299](https://github.com/buildpacks/rfcs/pull/299) +- CNB Pull Request: (leave blank) +- CNB Issue: N/A +- Supersedes: (put "N/A" unless this replaces an existing RFC, then link to that RFC) + +# Summary +[summary]: #summary + +This RFC proposes leveraging [OpenTelemetry](https://opentelemetry.io/) to +grant platform operators and buildpack operators more insight into buildpack +performance and behavior. This RFC describes new opt-in functionality +for pack, the lifecycle, and the buildpack spec such that OpenTelemetry data may be +exported to the build file system. + +# Definitions +[definitions]: #definitions + +- [OpenTelemetry](https://opentelemetry.io/): A collection of APIs, SDKs, and tools that can be used it to instrument, generate, collect, and export telemetry data. +- [Traces](https://opentelemetry.io/docs/concepts/signals/traces/): Telemetry + category that describes the path of software execution. + + +# Motivation +[motivation]: #motivation + +Buildpack authors and platform operators desire insight into usage, error +scenarios, and performance of builds and buildpacks on their platform. The +following questions are all important for these folks, but difficult to answer: + +- "Which buildpacks commonly fail to compile?" +- "How often does a particular error scenario occur?" +- "How long does each buildpack compile phase take?" +- "How often is a certain buildpack used?" +- "Which versions of Go are being installed?" +- "How long does it take to download node_modules?" + +Instrumenting lifecycle and buildpacks with opt-in OpenTelemetry tracing will +allow platform operators to better understand performance and behavior of their +builds and buildpacks and as a result, provide better service and build +experiences. + +To protect privacy and prevent unnecessary collection of data, this +functionality shall be optional and anonymous. + +# What it is +[what-it-is]: #what-it-is + +This RFC aims to provide a solution for two types of OpenTelemetry traces: + +1) Lifecycle tracing: Buildpack-agnostic trace data like which buildpacks were +available, which buildpacks were detected, how long the detect, build, or +export phase took, and so on. This telemetry data may be exported by lifecycle. +2) Buildpack tracing: Telemetry data specific to a buildpack like how long it +took to download a language binary, which language version was selected, and so +on. This telemetry data may be exported by buildpacks. + +Though the sources and contents of the telemetry data differ, both types may +be emitted to the build file system in OpenTelemetry's [File Exporter +Format](https://opentelemetry.io/docs/specs/otel/protocol/file-exporter/). + +In this solution, each lifecycle phase would write a `.jsonl` file with +tracing data for that phase. For example, `lifecycle detector --telemetry` +would write to `/layers/tracing/lifecycle/detect.jsonl`. Additionally each +buildpack may also write tracing data to it's own `.jsonl` files (at +`/layers/tracing/buildpacks/#{id}@#{version}-#{phase}.jsonl`). + +These `.jsonl` files may be read by platform operators for consumption, +transformation, enrichment, and/or export to an OpenTelemetry backend. Given +that builds may crash or fail at any point, these files must be written to +often and regularly to prevent data loss. + +Platform operators will likely want to view or analyze this data. These +telemetry files are in OTLP compatible format, so may be exported to one or +more OpenTelemetry backends like Honeycomb, Prometheus, and [many +others](https://opentelemetry.io/ecosystem/vendors/). + +Additionally, these traces may be correlated with traces in a platform +operator's system via context propagation. The `CNB_OTEL_TRACEPARENT` may be +provided by a platform to the build environment, such that generated traces +inherit `trace-id` and `parent-id` from platform systems. + +# How it Works +[how-it-works]: #how-it-works + +### Lifecycle telemetry files + +If `lifecycle` is provided the telemetry opt-in flag (such as `--telemetry`), +`lifecycle` phases (such as `detect`, `build`, `export`) may emit an +OpenTelemetry File Export with tracing data to a known location, such as +`/layers/tracing/lifecycle/detect.jsonl` with contents like this: + +```json +{ + "resourceSpans": [ + { + "resource": { + "attributes": [ + { + "key": "lifecycle.version", + "value": { + "stringValue": "0.17.1" + } + } + ] + }, + "scopeSpans": [ + { + "scope": {}, + "spans": [ + { + "traceId": "", + "spanId": "", + "parentSpanId": "", + "name": "buildpack-detect", + "startTimeUnixNano": "1581452772000000321", + "endTimeUnixNano": "1581452773000000789", + "droppedAttributesCount": 2, + "events": [ + { + "timeUnixNano": "1581452773000000123", + "name": "detect-pass" + } + ], + "attributes": [ + { + "key": "buildpack-id", + "value": { + "stringValue": "heroku/nodejs-engine" + } + } + ], + "droppedEventsCount": 1 + } + ] + } + ] + } + ] +} +``` + + +### Buildpack telemetry files + +During a buildpack's `detect` and/or `build` execution, a buildpack may emit +an OpenTelemetry File Export with tracing data to `/layers/tracing/buildpacks/#{id}@#{version}-#{phase}.jsonl` +with contents like this: + +```json +{ + "resourceSpans": [ + { + "resource": { + "attributes": [ + { + "key": "lifecycle.version", + "value": { + "stringValue": "0.17.1" + } + } + ] + }, + "scopeSpans": [ + { + "scope": {}, + "spans": [ + { + "traceId": "", + "spanId": "", + "parentSpanId": "", + "name": "buildpack-detect", + "startTimeUnixNano": "1581452772000000321", + "endTimeUnixNano": "1581452773000000789", + "droppedAttributesCount": 2, + "events": [ + { + "timeUnixNano": "1581452773000000123", + "name": "detect-pass" + } + ], + "attributes": [ + { + "key": "buildpack-id", + "value": { + "stringValue": "heroku/nodejs-engine" + } + } + ], + "droppedEventsCount": 1 + } + ] + } + ] + } + ] +} +``` + +### Location + +All tracing files should be written to `/layers/tracing/`. Lifecycle execution +traces should be written to `/layers/tracing/lifecycle/{phase}.jsonl`. +Buildpack traces may be written to +`/layers/tracing/buildpacks/{id}@{version}-{phase}.jsonl`. +Extension traces may be written to +`/layers/tracing/extensions/{id}@{version}-{phase}.jsonl`. + +A completed build with tracing might have a tracing file hierarchy like this: + +``` + +└── tracing + ├── buildpacks + │ ├── other-id@other-version-detect.jsonl + │ ├── some-id@some-version-build.jsonl + │ └── some-id@some-version-detect.jsonl + ├── extensions + │ ├── some-id@some-version-detect.jsonl + │ └── some-id@some-version-generate.jsonl + └── lifecycle + ├── analyze.jsonl + ├── build.jsonl + ├── detect.jsonl + ├── export.jsonl + ├── extend.jsonl + └── restore.jsonl +``` + +### Lifetime + +Telemetry files may be written at any point during the build, so that they +are persisted in cases of failures to detect, failures to build, process +terminations, or crashes. The `jsonl` format allows telemetry libraries to +safely append additional json objects to the end of a telemetry file, so +telemetry data can be flushed to the file frequently. Telemetry files should +not be truncated or deleted so that telemetry processing by a platform can +happen during or after a build. Telemetry files should not be included in the +build result, as they are not relevant, and would likely negatively impact +image size and reproduceability. + +### Access + +The telemetry files should be group readable so that they may be analyzed by +the user and/or platform during and/or after the build. The telemetry files +must also be group readable so that buildpacks and lifecycle can write to them, +but buildpacks and the lifecycle shall read and write only their own files. + +### Context Propagation + +To allow correlation of lifecycle and buildpack traces to traces in platform +operator's systems, `CNB_OTEL_TRACEPARENT` may be provided for `lifecycle` and +buildpacks. The value of this env var should follow +[W3C Trace Context specification for traceparent field values](https://www.w3.org/TR/trace-context/#traceparent-header-field-values). +If provided, generated traces by lifecycle and buildpacks shall inherit the +`trace-id` and `parent-id` provided therein. + +### Consumption + +This RFC leaves the consumption of telemetry files to the platform operator. +Platform operators choosing to use these metrics may read them either during +or after the build. This can be done using existing OpenTelemetry libraries. +Platform operators may choose to optionally enrich or modify the tracing data +as they see fit (with data like `instance_id` or `build_id`). Platform +operators will likely want to export this data to an OpenTelemetry backend for +persistence and analysis, and again, this may be done with existing +OpenTelemetry libraries. + +### Viewing and Analyzing + +Once the lifecycle and buildpack traces are exported to an OpenTelemetry +backend, platform operators should be able to (depending on the features of the +backend): + +- View the complete trace for a build +- View or query attributes attached to spans (e.g. `buildpack_id`, + `nodejs_version`) +- View or query span durations +- View or query error types and/or messages +- and more + +### Intent + +The purpose and intent of these files is to provide anonymous build +observability data for users and platform operators. These files shall not +be used for other intents. For example: + +- These files shall not be used as an API, contract, or communication mechanism + between buildpacks. +- These files shall not record any personally identifiable information (such + as usernames, email addresses, IP Addresses, etc.). +- These files shall not record any potentially business sensitive information + (such as passwords, access keys, resulting image name and/or urls, source + code repository name, etc.). + +# Migration +[migration]: #migration + +No migration neccessary, this is net-new functionality with no backwards +compatibilty concerns. + +# Drawbacks +[drawbacks]: #drawbacks + +### Privacy Concerns + +This RFC outlines functionality that could be percieved as user tracking. To +help remediate those concerns, these are some factors to remember about this +design: + +1) This functionality is opt-in. `lifecycle` and `pack` will not emit telemetry + data unless the `--telemetry` flag is used. +2) This functionality emits telemetry data only to the build file system. For + `pack` users, the telemetry files are stored in docker volumes on the local + machine. Neither `pack` nor `lifecycle` will "phone home" with telemety data. +3) Neither `pack` nor `lifecycle` collect personally identifiable information + (no emails, usernames, passwords, IP addresses, etc.). + +### File Export Format Status + +While the [File Exporter +Format](https://opentelemetry.io/docs/specs/otel/protocol/file-exporter/) is +an official format, and matches the OTLP format nearly exactly (and thus seems +unlikely to change), it is listed as experimental status. + + +### Additional restricted layer name + +This RFC introduces `/layers/tracing/`. This means that buildpack authors will +be unable to use this directory as a , and lifecycle will need to +prevent usage of this directory. + +# Alternatives +[alternatives]: #alternatives + +### OpenTelemetry Metrics + +[Metrics](https://opentelemetry.io/docs/concepts/signals/metrics/) are another +category of telemetry data that could be used to answer questions about +build and buildpack behavior and performance. However, metrics are intended to +provide statistical information in aggregate. Since `lifecycle` and `pack` +only run one build at a time, there is no way to aggregate information about +multiple builds in `pack` or `lifecycle`. + +### OTLP + +The [OpenTelemetryProtocol](https://opentelemetry.io/docs/specs/otlp/) is a +network delivery protocol for OpenTelemetry data. Instead of emitting files as +this RFC describes, lifecycle and buildpacks could instead connect to an +OpenTelemetry collector provided by the platform operator. This pattern is +well supported and well known. + +However, there are drawbacks: + +- In local `pack build` scenarios, it's unlikely that users would have an + OpenTelemetry collector running. This RFC solution does not require a + collector. +- lifecycle and buildpacks would need to know where the OpenTelemetry collector + is and how to authenticate with it. Lifecycle and buildpacks that wish to + emit telemetry may not want to deal with the mountain of configuration to + support various collectors. +- Platform operators may have complex network topology that may make supporting + this feature challenging (e.g. a firewall between lifecycle and the collector + may still be perceived as a lifecycle malfunction). + +There is an [RFC for this alternative](https://github.com/buildpacks/rfcs/pull/300). + +# Prior Art +[prior-art]: #prior-art + + +- [Feature Request](https://github.com/buildpacks/lifecycle/issues/1208) +- [Slack + Discussion](https://cloud-native.slack.com/archives/C033DV8D9FB/p1695144574408979) +Discuss prior art, both the good and bad. + +# Unresolved Questions +[unresolved-questions]: #unresolved-questions + + +# Spec. Changes (OPTIONAL) +[spec-changes]: #spec-changes + +Buildpack tracing file locations and format should be added to the [buildpack +specification](https://github.com/buildpacks/spec/blob/main/buildpack.md#build). + +# History +[history]: #history + + diff --git a/text/0132-implementing-pack-detect.md b/text/0132-implementing-pack-detect.md new file mode 100644 index 000000000..085539aa1 --- /dev/null +++ b/text/0132-implementing-pack-detect.md @@ -0,0 +1,138 @@ +# Meta + +[meta]: #meta + +- Name: Implementing pack detect command +- Start Date: 2024-02-15 +- Author(s): @rashadism +- Status: Draft +- RFC Pull Request: (leave blank) +- CNB Pull Request: (leave blank) +- CNB Issue: (leave blank) +- Supersedes: N/A + +# Summary + +[summary]: #summary + +The `pack execute` command is introduced to the Cloud Native Buildpacks ecosystem, providing a way to run only selected phases of the buildpack lifecycle. For the moment, only `detect` will be implemented, which will be invoked through `pack execute detect`.This feature enhances the developer experience by allowing them to quickly determine which buildpacks are relevant for their application without progressing through the entire build process. This was partially discussed in [issue #681](https://github.com/buildpacks/pack/issues/681) but the issue was about implementing a `dry-run` flag. With further discussion with @jjbustamante, decided to go forward with this as a new `pack detect` command rather than a flag, and after further review it will be implemented as `pack execute detect`. + +# Definitions + +[definitions]: #definitions + +Make a list of the definitions that may be useful for those reviewing. Include phrases and words that buildpack authors or other interested parties may not be familiar with. + +# Motivation + +[motivation]: #motivation + +- Enable the running of selected phases of buildpacks upon need. +- Simplify and streamline the build process by providing a targeted command for buildpack detection. +- Reduce build times by skipping unnecessary phases of the buildpack lifecycle. +- Enable developers to quickly identify which buildpacks are applicable to their application without waiting for the entire build process to complete, or having to `Ctrl+C` midway through. +- Lighter-weight integration testing of the build plan. + +# What it is + +[what-it-is]: #what-it-is + +This provides a high level overview of the feature. + +- Define any new terminology. +- Define the target persona: buildpack author, buildpack user, platform operator, platform implementor, and/or project contributor. +- Explaining the feature largely in terms of examples. +- If applicable, provide sample error messages, deprecation warnings, or migration guidance. +- If applicable, describe the differences between teaching this to existing users and new users. + +# How it Works + +[how-it-works]: #how-it-works + +Ideally, the user should run something like `pack execute detect --path ./path/to/project --builder builder:name` and it should run the analyze binary, followed by the detect binary in the lifecycle and output the logs / output of it. This also copies `group.toml` to a directory specified with `--detect-output-dir`, if the flag was enabled. The reason to run the analyze binary is to get information about the run image that may impact the outcome of detect via CNB*TARGET*\* environment variables. + +The following flags should be supported and they will work more or less like `pack build`. + +| Short | Long | type | +| ----- | -------------------- | ----------- | +| -B | --builder | string | +| -b | --buildpack | strings | +| -r | --buildpack-registry | string | +| | --detect-output-dir | string | +| -d | --descriptor | string | +| | --docker-host | string | +| -e | --env | stringArray | +| | --env-file | stringArray | +| | --extension | strings | +| | --gid | int | +| -h | --help | +| | --lifecycle-image | string | +| | --network | string | +| -p | --path | string | +| | --post-buildpack | stringArray | +| | --pre-buildpack | stringArray | +| | --pull-policy | string | +| | --run-image | string | +| | --uid | int | +| | --workspace | string | + +# Migration + +[migration]: #migration + +This feature does not introduce any breaks to public APIs or compatibility. It provides additional functionality within the existing Cloud Native Buildpacks CLI tooling, enhancing the developer experience without requiring changes to existing workflows or configurations. + +# Drawbacks + +[drawbacks]: #drawbacks + +Why should we _not_ do this? + +# Alternatives + +[alternatives]: #alternatives + +Initially thought of implementing this through something like `pack build --detect`. But after further discussion with @jjbustamante and for the following reasons, decided to do this functionality to a new command. Upon further review, this will be implemented as `pack execute detect ..` + +- The main use case of `pack build` is to create OCI images, and detect is just a binary in the lifecycle, so it doesn't make much sense to include it in there. +- To avoid making the mostly used `pack build` command overly complicated. + +# Prior Art + +[prior-art]: #prior-art + +This has been discussed in Issue #681 before, and looked like it was a long awaited feature and currently a few workarounds are being used to get this functionality. + +# Unresolved Questions + +[unresolved-questions]: #unresolved-questions + +Fill after initial discussion + +# Spec. Changes (OPTIONAL) + +[spec-changes]: #spec-changes + +Since this is a new command, the functionality of this command will have to be amended to the spec / docs. + +# History + +[history]: #history + + diff --git a/text/0133-remove-windows-containers-support.md b/text/0133-remove-windows-containers-support.md new file mode 100644 index 000000000..efeccfd8b --- /dev/null +++ b/text/0133-remove-windows-containers-support.md @@ -0,0 +1,97 @@ +# Meta +[meta]: #meta +- Name: Remove Windows Containers Support +- Start Date: 2024-04-11 +- Author(s): aidan.delaney +- Status: Approved +- RFC Pull Request: [rfcs#311](https://github.com/buildpacks/rfcs/pull/311) +- CNB Pull Request: (leave blank) +- CNB Issue: (leave blank) +- Supersedes: "N/A" + +# Summary +[summary]: #summary + +Retain Windows Containers support in the buildpacks specification, but remove Windows Containers support from `lifecycle`, `pack` CLI, and documentation. + +# Definitions +[definitions]: #definitions + +Make a list of the definitions that may be useful for those reviewing. Include phrases and words that buildpack authors or other interested parties may not be familiar with. + +* [Windows Containers](https://learn.microsoft.com/en-us/virtualization/windowscontainers/about/): Containers derived from Windows Server Containers. For example: `mcr.microsoft.com/dotnet/framework/sdk:4.8-windowsservercore-ltsc2022` + +# Motivation +[motivation]: #motivation + +- There are no known users of Windows Container support within the buildpacks ecosystem. +- Supporting Windows Containers is straightforward at a specification level. However, the CNB project has neither the time nor the expertise to continue to support Windows Containers in our tooling. +- Removing Windows Container support from CNB tooling will speed up development, simplify testing and impact no existing users. +- Establish platform parity between `pack` and `kpack`. `kpack` has never supported building Windows Containers. + +# What it is +[what-it-is]: #what-it-is + +- We want to remove Windows Containers related documentation, but clearly state that the specification is platform neutral. +- Remove Windows Containers support from `pack`, but retain the Windows platform release of the `pack` CLI. + * this ensures `pack` users can still invoke `pack` on a Command or Power Shell console +- Remove Windows Containers support from `lifecycle`. Lifecycle can remove the Windows platform release as it will then be guaranteed to run on a Linux-based container. + +# How it Works +[how-it-works]: #how-it-works + + +# Migration +[migration]: #migration + +Teams that require Windows Container support will have to + +* continue to use the last release of `pack` that supports Windows Containers, +* fork `pack` and `lifecycle` to maintain Windows Containers support, or +* adopt alternative tooling that supports Windows Containers. + +# Drawbacks +[drawbacks]: #drawbacks + +Windows Containers support is well tested within `pack`. Removing support for Windows Containers removes a unique selling point of the `pack` CLI and `lifecycle` + +# Alternatives +[alternatives]: #alternatives + +- We have discussed platform support with `pack` users. There are no `pack` users who want to maintain Windows Container support. Should engineering effort become available to help with development and maintenance, then this proposal can be dropped. + +# Prior Art +[prior-art]: #prior-art + +N/A + +# Unresolved Questions +[unresolved-questions]: #unresolved-questions + +- We have made outreach efforts to CNB users to survey requirements of Windows Container support. We have found no users. The number of CNB users that require Windows Container support is still an open question. + +# Spec. Changes (OPTIONAL) +[spec-changes]: #spec-changes + +This proposal requires no specification changes. + +# History +[history]: #history + + diff --git a/text/0134-execution-environments.md b/text/0134-execution-environments.md new file mode 100644 index 000000000..cf10b2491 --- /dev/null +++ b/text/0134-execution-environments.md @@ -0,0 +1,260 @@ +# Meta +[meta]: #meta +- Name: Execution Environments +- Start Date: 2023-01-11 +- Author(s): @hone +- Status: Approved +- RFC Pull Request: [rfcs#274](https://github.com/buildpacks/rfcs/pull/274) +- CNB Pull Request: (leave blank) +- CNB Issue: [rfcs#327](https://github.com/buildpacks/rfcs/issues/327) +- Supersedes: N/A + +# Summary +[summary]: #summary + +Add buildpack support for multiple execution environments, with better support for testing as the first use case. + +# Definitions +[definitions]: #definitions + +- Execution Environment - The target environment the OCI image is expected to be executed in, i.e. production, test, development. + +# Motivation +[motivation]: #motivation + +Buildpacks are mainly being used to build images for production environments, but this is only one piece of the software development process. Without test environment support for buildpacks, users currently have to implement alternatives for testing their source code. Buildpacks are well positioned to help bridge that gap, as building production and test environments often share many of the same broad strokes. + +# What it is +[what-it-is]: #what-it-is + +## Test Environments + +One of the goals in this design is to minimize the changes needed. The Buildpack lifecycle is already well suited to produce environments: + +``` +App Source -> OCI Image (Production Environment) -> Execute (Launch Process) +``` + +To create a test environment, it can follow the same flow: + +``` +App Source -> OCI Image (Test Environment) -> Execute (Test Process) +``` + +### Division of Responsibility +With the test OCI Image, a platform can execute the tests in the pipeline as they see fit. This means a bulk of the responsibilities are platform concerns: + +- Set which environment to build for +- Decide which buildpacks to execute +- How to execute the tests +- What is the test result format like [TAP](https://en.wikipedia.org/wiki/Test_Anything_Protocol)? +- How to process the test results +- What to do with the results + +This narrows the scope of the Buildpack responsibilities to what it's already good at: + +- Produce Test Environment +- How to launch the process + - launch.toml with test process marked as default (recommendation) + +## Setting the Execution Enviroment for Build + Buildpack +A platform will set the `CNB_EXEC_ENV` env var to the desired execution environment. Buildpacks can then read this env var to branch or switch on logic needed for the execution environment. + +In addition, Builder Authors, Buildpack Authors, and App Developers will be able to configure various options for specific execution environments using the `exec-env` key. + +## Development Environments +The specifics of creating development environments are out of scope of this RFC, but it's not hard to extrapolate how the proposed changes can assist in creating Buildpacks for development environments. + +# How it Works +[how-it-works]: #how-it-works + +## `exec-env` key in TOML + +In order to support additional execution environments, an optional `exec-env` key will be added to various TOML tables in the project. This will be an array that takes string values. An individual element can be any string with `*` having special meaning. Similar to the ["any stack RFC"](https://github.com/buildpacks/rfcs/blob/main/text/0056-any-stack-buildpacks.md), `*` will apply to all execution environments. `["*"]` will be the default if not specified. This should make the key backwards compatible. When `exec-env` is not empty and does not include `*`, the table settings will only be applied to the specified execution environments. + +### Project Descriptor - `project.toml` (App Developers) + +An app developer may have execution environment specific configuration, like only using a metrics agent in production, or a headless user agent in test. In order to facilitate these needs, the project descriptor will be extended by adding `exec-env` to the following tables: + +* `[[io.buildpacks.group]]` +* `[[io.buildpacks.pre.group]]` +* `[[io.buildpacks.post.group.env]]` +* `[[io.buildpacks.build.env]]` + +An example would look like this: + +```TOML +[_] +schema-version = "0.3" + +[[io.buildpacks.group]] +id = "buildpacks/ruby" +version = "latest" + +[[io.buildpacks.group]] +id = "buildpacks/nodejs" +version = "latest" +exec-env = ["production", "test"] + +[[io.buildpacks.group]] +id = "buildpacks/metrics-agent" +version = "latest" +exec-env = ["production"] + +[[io.buildpacks.group]] +id = "buildpacks/headless-chrome" +version = "latest" +exec-env = ["test"] + +[[io.buildpacks.post.group]] +id = "buildpacks/procfile" +version = "latest" + +[[io.buildpacks.build.env]] +name = "RAILS_ENV" +value = "production" +exec-env = ["production"] + +[[io.buildpacks.build.env]] +name = "RAILS_ENV" +value = "test" +exec-env = ["test"] + +[[io.buildpacks.build.env]] +name = "PARALLEL_WORKERS" +value = "4" +exec-env = ["production"] +``` + +### `builder.toml` (Builder Authors) + +The `exec-env` key will be added to the `[[order.group]]` and `[[build.env]]` arrays of tables. + +### `buildpack.toml` (Buildpack Authors) + +As a piece of metadata, a buildpack should list the execution environments it supports in the `[[buildpack.exec-env]]` array. This can be used by the buildpack registry in the future. + +For composite buildpacks, `exec-env` will be added to is `[[buildpack.order.group]]`. + +### `launch.toml` (Buildpack Authors) + +Not all process types make sense for every execution environment. In order to help hint to the platform the intention how a process should be used, the `exec-env` key will be added to the `[[processes]]` table. + +### `metadata.toml` (Platform Operators) + +On the platform side, the `exec-env` key will be added to `metadata.toml` in the `[[processes]]` table to mirror `launch.toml`. This will help platforms decide which processes are desired for each execution environment. `lifecycle` will list all processes and will not exclude any based on the execution environment Instead, a platform can use the available information to make a decision. + +## `CNB_EXEC_ENV` Environment Variable + +This value is a string and must abide by similar rules we use for IDs: + +* MUST only contain numbers, letters, and the characters `.`, and `-`. + +The spec will reserve the following values to help standardize execution environments: + +* production +* test +* development + +In addition, the `/` character is reserved in case we need to introduce namespacing in the future. + +### Buildpack API + +A buildpack author will be able to determine the execution environment their buildpack is expected to build for by reading the `CNB_EXEC_ENV` environment variable. If this value is not set, a Buildpack Author can assume it's set to `production`. This will be provided for both `bin/detect` and `bin/build`. + +This would let a buildpack author do different things based on the execution environment. For example, it's common for a production build to exclude test dependencies or files that a test environment would include. For compiled languages, a production build might even remove the source code and just leave the compiled binary with optimizations. In a test environment, a build may include debug symbols, be compiled without optimizations, and leave the source code untouched. + +### Platform API + +It will be up to the platform to set the environment variable `CNB_EXEC_ENV`. If this value is set, `lifecycle` MUST NOT override this value. If the value is not set, `lifecycle` will set it to `production`. + +During the export phase, `lifecycle` will set the `io.buildpacks.exec-env` label with the value from the `CNB_EXEC_ENV` environment variable. This will make it easier for anyone examining the OCI image to determine the execution environment. + +# Migration +[migration]: #migration + +In order to allow the Buildpack API and Platform API to be updated independently, both APIs will have a fallback to the `production` value. For `1.0`, it would be nice to drop this behavior and assume it will always be set. + +This also touches Project Descriptor, but since the platform controls the Platform API and the Project Descriptor Extension API it shouldn't cause any issues. + +# Drawbacks +[drawbacks]: #drawbacks + +While the design tries to stay simple, it still adds new additions to the spec. + +* env var to Buildpack API +* env var to Platform API +* field to Project Descriptor, `builder.toml`, and `buildpack.toml`. + +# Alternatives +[alternatives]: #alternatives + +## Separate Execution Environment Project Descriptor files + +When using `Dockerfile`, it's common to create a separate one for other execution environments like `Dockerfile.test`. This makes the file clean and easy to read, but comes at the cost of duplicating setup/code. + +With Project Descriptor, this would remove the need for the `exec-env` key at the cost of replicating buildpack groups. It does bring a big benefit by easily unlocking the `builder` key in Project Descriptor to be specific to an execution environment. This would apply to any fields (not tables) that exist at the `[io.buildpacks]` level like `include`/`exclude`. There are some other options if this is desired, where a new table could be created for hoisting fields into that would allow us to add the `exec-env` field. + +# Prior Art +[prior-art]: #prior-art + +## [Heroku Testpack API](https://devcenter.heroku.com/articles/testpack-api) + +As part of the classic buildpack design, Heroku extended the API to include support for tests. It added a `bin/test-compile` phase, which is similar to the `bin/compile` phase, but specific for setting things up for test. `bin/test` was called for executing tests. This design lets buildpack authors write codepaths and logic optimized specifically for executing tests based on the source code. For example, [this Ruby buildpack](https://github.com/heroku/heroku-buildpack-ruby/blob/fb393cb46e23ab809e21daeef2a97cbd5f04a370/bin/support/ruby_test#L62-L76) will run `bin/rspec` if it detects `rspec`, `bin/rails test` when using rails, or default to `rake test`. + +There were some flaws in this design. Though it's clean to separate production and test code paths, they end up sharing a lot of code. Many of the bash based Heroku buildpacks would just [call `bin/compile`](https://github.com/heroku/heroku-buildpack-nodejs/blob/main/bin/test-compile#L24) with different parameters/env vars. + +## [GOOGLE_DEVMODE](https://cloud.google.com/docs/buildpacks/service-specific-configs#google_devmode) + +These are specific to the Google Cloud Buildpacks for setting a development code path to work with skaffold. + +## [Develop API](https://github.com/buildpacks/spec/pull/71) + +The original Cloud Native Buildpacks spec included a Develop API, but it was never implemented. + +# Unresolved Questions +[unresolved-questions]: #unresolved-questions + +- "env" is overloaded as a word since we also use it for environment variables. Is there a better word here? +While "env" is overloaded, it matches the intent. +- Should there be builders that are specific to an execution environment? What about `include` or `exclude`? +No reason right now to restrict builders to an execution environment. +- Should the execution environments be an enum or flexible as a string? + - enums will help encourage standardization across buildpacks and platforms. + - strings can help account for use cases we haven't thought of yet. +We're going to opt for a string to be flexible. +- Should buildpacks be allowed specify allowlist execution environments? +We're currently optimizing for app developer flexibility to use the buildpacks as they see fit. +- What changes are needed in the buildpack registry? +The buildpack registry will need to expand the index to support the `buildpack.toml`'s `exec-env` field. +- Does `build.env` need to support execution environments in `builder.toml`? +- Should the reserved exec env strings be namespaced? +We're going to reserve the `/` character for namespacing if needed in the future. +- Instead of creating a new label, should we stash it into a JSON label? +The current JSON blobs are exports of lifecycle files, so we're going to create a new one for now. + +# Spec. Changes (OPTIONAL) +[spec-changes]: #spec-changes + +See ["How it Works"](#how-it-works). + +# History +[history]: #history + +