diff --git a/Makefile b/Makefile index 9d11cbc7cf8..c4de5712b13 100644 --- a/Makefile +++ b/Makefile @@ -384,7 +384,14 @@ install-tools: go install $(GO_MOD_FLAGS) github.com/golang/mock/mockgen go install $(GO_MOD_FLAGS) golang.org/x/lint/golint go install $(GO_MOD_FLAGS) github.com/golangci/golangci-lint/cmd/golangci-lint + go install $(GO_MOD_FLAGS) sigs.k8s.io/controller-runtime/tools/setup-envtest .PHONY: coverage coverage: hack/codecov.sh + +.PHONY: setup-envtest +# Downloads envtest binaries and prints the required export KUBEBUILDER_ASSETS=... command. +# Run the printed export command (or add it to your shell rc) before running benchmarks. +setup-envtest: + hack/setup-envtest.sh diff --git a/go.mod b/go.mod index ff590a97a0d..4a149ee823e 100644 --- a/go.mod +++ b/go.mod @@ -74,6 +74,7 @@ require ( k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 sigs.k8s.io/cluster-api-provider-azure v1.21.1-0.20250929163617-2c4eaa611a39 sigs.k8s.io/controller-runtime v0.22.3 + sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240927101401-4381fa0aeee4 sigs.k8s.io/controller-tools v0.19.0 sigs.k8s.io/yaml v1.6.0 ) diff --git a/hack/benchmark-comparison.sh b/hack/benchmark-comparison.sh new file mode 100755 index 00000000000..fce923fb74c --- /dev/null +++ b/hack/benchmark-comparison.sh @@ -0,0 +1,127 @@ +#!/bin/bash +# Compare benchmark performance before and after working changes. +# +# Uses git worktrees for safe comparison without touching your working tree. +# Compares the current working tree against a base commit (default: HEAD~1). +# +# Usage: +# ./hack/benchmark-comparison.sh [base-commit] +# +# Examples: +# # Compare current working tree vs parent commit +# ./hack/benchmark-comparison.sh +# +# # Compare current working tree vs main branch +# ./hack/benchmark-comparison.sh main +# +# # Compare with custom benchmark settings +# BENCHTIME=10x COUNT=8 ./hack/benchmark-comparison.sh +# +# # Benchmark specific packages +# BENCH_PKGS="./pkg/resource/" ./hack/benchmark-comparison.sh + +set -eo pipefail + +# Configurable benchmark parameters +BENCHTIME=${BENCHTIME:-5x} +COUNT=${COUNT:-6} +BENCH_PATTERN=${BENCH_PATTERN:-'^Benchmark'} +BENCH_PKGS=${BENCH_PKGS:-./test/benchmark/} +BASE_COMMIT=${1:-HEAD~1} + +# Ensure we're in a git repository +if ! git rev-parse --git-dir > /dev/null 2>&1; then + echo "Error: Not in a git repository" + exit 1 +fi + +# Ensure KUBEBUILDER_ASSETS is set +if [ -z "$KUBEBUILDER_ASSETS" ]; then + echo "Error: KUBEBUILDER_ASSETS environment variable not set" + echo "" + echo "Run: make setup-envtest" + echo "Then: export KUBEBUILDER_ASSETS=\"\$(hack/setup-envtest.sh | tail -1 | cut -d'=' -f2 | tr -d '\"')\"" + exit 1 +fi + +# Verify base commit exists +if ! git rev-parse "$BASE_COMMIT" > /dev/null 2>&1; then + echo "Error: Base commit '$BASE_COMMIT' not found" + exit 1 +fi + +TEMP_DIR=$(mktemp -d) +WORKTREE_DIR=$(mktemp -d -t hive-benchmark-worktree.XXXXXX) + +cleanup() { + if [ -d "$WORKTREE_DIR" ]; then + echo "" + echo "Cleaning up worktree..." + git worktree remove "$WORKTREE_DIR" --force 2>/dev/null || true + rm -rf "$WORKTREE_DIR" 2>/dev/null || true + fi +} +trap cleanup EXIT + +echo "===========================================================" +echo "Benchmark Comparison" +echo "===========================================================" +echo "Base commit: $BASE_COMMIT ($(git rev-parse --short "$BASE_COMMIT"))" +echo "Working tree: $(git rev-parse --short HEAD)$(git diff --quiet && git diff --cached --quiet || echo ' (dirty)')" +echo "Benchmark time: ${BENCHTIME}" +echo "Run count: ${COUNT}" +echo "Pattern: ${BENCH_PATTERN}" +echo "Packages: ${BENCH_PKGS}" +echo "Results dir: ${TEMP_DIR}" +echo "===========================================================" +echo "" + +# Run benchmarks in the current working tree +echo "-> Running benchmarks in current working tree..." +go test -bench="${BENCH_PATTERN}" -benchmem -benchtime="${BENCHTIME}" -count="${COUNT}" ${BENCH_PKGS} 2>&1 | tee "${TEMP_DIR}/new.txt" + +# Create worktree at base commit +echo "" +echo "-> Creating worktree at ${BASE_COMMIT}..." +git worktree add --detach "$WORKTREE_DIR" "$BASE_COMMIT" --quiet + +# Run benchmarks at base commit (in worktree) +echo "-> Running benchmarks at base commit..." +(cd "$WORKTREE_DIR" && \ + export KUBEBUILDER_ASSETS="$KUBEBUILDER_ASSETS" && \ + go test -bench="${BENCH_PATTERN}" -benchmem -benchtime="${BENCHTIME}" -count="${COUNT}" ${BENCH_PKGS}) 2>&1 | tee "${TEMP_DIR}/old.txt" + +# Cleanup worktree early +echo "" +git worktree remove "$WORKTREE_DIR" --force +rm -rf "$WORKTREE_DIR" + +# Compare results +echo "" +echo "===========================================================" +echo "BENCHMARK COMPARISON" +echo "===========================================================" +echo "" + +if ! command -v benchstat &> /dev/null; then + echo "Warning: benchstat not installed. Install with:" + echo " go install golang.org/x/perf/cmd/benchstat@latest" + echo "" + echo "Raw results saved to:" + echo " Old: ${TEMP_DIR}/old.txt" + echo " New: ${TEMP_DIR}/new.txt" +else + benchstat "${TEMP_DIR}/old.txt" "${TEMP_DIR}/new.txt" +fi + +echo "" +echo "===========================================================" +echo "Results saved to: ${TEMP_DIR}" +echo " Base ($BASE_COMMIT): ${TEMP_DIR}/old.txt" +echo " Working tree: ${TEMP_DIR}/new.txt" +echo "" +if command -v benchstat &> /dev/null; then + echo "To view again:" + echo " benchstat ${TEMP_DIR}/old.txt ${TEMP_DIR}/new.txt" +fi +echo "===========================================================" diff --git a/hack/setup-envtest.sh b/hack/setup-envtest.sh new file mode 100755 index 00000000000..fd673ff3fbb --- /dev/null +++ b/hack/setup-envtest.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o pipefail + +if ! command -v setup-envtest &> /dev/null; then + echo "run 'make install-tools' to install setup-envtest" + exit 1 +fi + +K8S_VERSION=${K8S_VERSION:-$(go list -m -f "{{ .Version }}" k8s.io/api | awk -F'[v.]' '{printf "1.%d", $3}')} + +echo "using k8s version ${K8S_VERSION}" + +echo "Fetching binaries (on first run this may take some time)" +ENVTEST_PATH=$(setup-envtest use "$K8S_VERSION" -p path 2>&1) + +echo +echo "Run this now or add it to your shell rc:" +echo " export KUBEBUILDER_ASSETS=\"$ENVTEST_PATH\"" diff --git a/pkg/dependencymagnet/doc.go b/pkg/dependencymagnet/doc.go index 2f5e787b441..38c468d7ad7 100644 --- a/pkg/dependencymagnet/doc.go +++ b/pkg/dependencymagnet/doc.go @@ -25,4 +25,7 @@ import ( // TODO: remove this patch with kube bump to 1.34, which will carry the fix (https://github.com/kubernetes/kubernetes/pull/132378) // Work around for https://github.com/kubernetes/kubernetes/issues/132377 _ "k8s.io/code-generator/cmd/validation-gen" + + // Used for envtest + _ "sigs.k8s.io/controller-runtime/tools/setup-envtest" ) diff --git a/pkg/remoteclient/benchmark_test.go b/pkg/remoteclient/benchmark_test.go new file mode 100644 index 00000000000..5e47b72d2c4 --- /dev/null +++ b/pkg/remoteclient/benchmark_test.go @@ -0,0 +1,14 @@ +package remoteclient_test + +import ( + "os" + "testing" + + "github.com/openshift/hive/test/benchutil" +) + +func TestMain(m *testing.M) { + code := m.Run() + benchutil.StopEnvTest() + os.Exit(code) +} diff --git a/pkg/remoteclient/kubeconfig.go b/pkg/remoteclient/kubeconfig.go index bfc2ef9d925..fbec5b3ea17 100644 --- a/pkg/remoteclient/kubeconfig.go +++ b/pkg/remoteclient/kubeconfig.go @@ -13,11 +13,16 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -func NewBuilderFromKubeconfig(c client.Client, secret *corev1.Secret, controllerName hivev1.ControllerName) Builder { +func NewBuilderFromKubeconfig(c client.Client, secret *corev1.Secret, controllerName hivev1.ControllerName, opts ...BuilderOption) Builder { + var bo builderOptions + for _, o := range opts { + o(&bo) + } return &kubeconfigBuilder{ c: c, secret: secret, fieldManager: "hive3-" + string(controllerName), + opts: bo, } } @@ -25,6 +30,7 @@ type kubeconfigBuilder struct { c client.Client secret *corev1.Secret fieldManager string + opts builderOptions } // Build is also responsible for verifying reachability of client @@ -89,5 +95,12 @@ func (b *kubeconfigBuilder) UseSecondaryAPIURL() Builder { } func (b *kubeconfigBuilder) RESTConfig() (*rest.Config, error) { - return utils.RestConfigFromSecret(b.secret, false) + cfg, err := utils.RestConfigFromSecret(b.secret, false) + if err != nil { + return nil, err + } + if b.opts.transportWrapper != nil { + cfg.Wrap(b.opts.transportWrapper) + } + return cfg, nil } diff --git a/pkg/remoteclient/remoteclient.go b/pkg/remoteclient/remoteclient.go index b98843dc6eb..9ae0278ee9e 100644 --- a/pkg/remoteclient/remoteclient.go +++ b/pkg/remoteclient/remoteclient.go @@ -54,7 +54,11 @@ type Builder interface { // The controllerName is needed for metrics. // If the ClusterDeployment carries the fake cluster annotation, a fake client will be returned populated with // runtime.Objects we need to query for in all our controllers. -func NewBuilder(c client.Client, cd *hivev1.ClusterDeployment, controllerName hivev1.ControllerName) Builder { +func NewBuilder(c client.Client, cd *hivev1.ClusterDeployment, controllerName hivev1.ControllerName, opts ...BuilderOption) Builder { + var bo builderOptions + for _, o := range opts { + o(&bo) + } if utils.IsFakeCluster(cd) { clusterVersion := "" if cd.Status.InstallVersion != nil { @@ -70,6 +74,7 @@ func NewBuilder(c client.Client, cd *hivev1.ClusterDeployment, controllerName hi cd: cd, controllerName: controllerName, urlToUse: activeURL, + opts: bo, } } @@ -185,11 +190,29 @@ func SetUnreachableCondition(cd *hivev1.ClusterDeployment, connectionError error return } +// BuilderOption configures optional behavior on a Builder. +type BuilderOption func(*builderOptions) + +type builderOptions struct { + transportWrapper func(http.RoundTripper) http.RoundTripper +} + +// WithTransportWrapper adds a transport wrapper that will be applied to the +// REST config returned by RESTConfig(). The wrapper is applied after the +// controller metrics transport, so it sits on the outermost layer and +// observes all HTTP round trips. +func WithTransportWrapper(wrapper func(http.RoundTripper) http.RoundTripper) BuilderOption { + return func(o *builderOptions) { + o.transportWrapper = wrapper + } +} + type builder struct { c client.Client cd *hivev1.ClusterDeployment controllerName hivev1.ControllerName urlToUse int + opts builderOptions } const ( @@ -267,6 +290,10 @@ func (b *builder) RESTConfig() (*rest.Config, error) { utils.AddControllerMetricsTransportWrapper(cfg, b.controllerName, true) + if b.opts.transportWrapper != nil { + cfg.Wrap(b.opts.transportWrapper) + } + if override := b.cd.Spec.ControlPlaneConfig.APIURLOverride; override != "" { if b.urlToUse == primaryURL || (b.urlToUse == activeURL && IsPrimaryURLActive(b.cd)) { diff --git a/pkg/remoteclient/remoteclient_benchmark_test.go b/pkg/remoteclient/remoteclient_benchmark_test.go new file mode 100644 index 00000000000..5b87aa2907f --- /dev/null +++ b/pkg/remoteclient/remoteclient_benchmark_test.go @@ -0,0 +1,53 @@ +package remoteclient_test + +import ( + "testing" + + "github.com/openshift/hive/test/benchutil" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func benchBuilderOp(b *testing.B, fn func(*testing.B, *benchutil.BenchRemoteClient)) { + benchutil.ControllerHarness[*benchutil.BenchRemoteClient]{ + Setup: benchutil.SetupRemoteClient, + Reconcile: func(b *testing.B, brc *benchutil.BenchRemoteClient, _ []client.Object, _ int) { + fn(b, brc) + }, + }.Run(b) +} + +// BenchmarkBuilderBuild measures Build: secret read + parse + discovery + client creation. +func BenchmarkBuilderBuild(b *testing.B) { + benchBuilderOp(b, func(b *testing.B, brc *benchutil.BenchRemoteClient) { + if _, err := brc.NewBuilder().Build(); err != nil { + b.Fatalf("Build failed: %v", err) + } + }) +} + +// BenchmarkBuilderRESTConfig measures secret read + parse (no remote calls). +func BenchmarkBuilderRESTConfig(b *testing.B) { + benchBuilderOp(b, func(b *testing.B, brc *benchutil.BenchRemoteClient) { + if _, err := brc.NewBuilder().RESTConfig(); err != nil { + b.Fatalf("RESTConfig failed: %v", err) + } + }) +} + +// BenchmarkBuilderBuildDynamic measures BuildDynamic (no discovery). +func BenchmarkBuilderBuildDynamic(b *testing.B) { + benchBuilderOp(b, func(b *testing.B, brc *benchutil.BenchRemoteClient) { + if _, err := brc.NewBuilder().BuildDynamic(); err != nil { + b.Fatalf("BuildDynamic failed: %v", err) + } + }) +} + +// BenchmarkBuilderBuildKubeClient measures BuildKubeClient (no discovery). +func BenchmarkBuilderBuildKubeClient(b *testing.B) { + benchBuilderOp(b, func(b *testing.B, brc *benchutil.BenchRemoteClient) { + if _, err := brc.NewBuilder().BuildKubeClient(); err != nil { + b.Fatalf("BuildKubeClient failed: %v", err) + } + }) +} diff --git a/pkg/resource/benchmark_test.go b/pkg/resource/benchmark_test.go new file mode 100644 index 00000000000..0bae6b9fbe0 --- /dev/null +++ b/pkg/resource/benchmark_test.go @@ -0,0 +1,14 @@ +package resource_test + +import ( + "os" + "testing" + + "github.com/openshift/hive/test/benchutil" +) + +func TestMain(m *testing.M) { + code := m.Run() + benchutil.StopEnvTest() + os.Exit(code) +} diff --git a/pkg/resource/helper_benchmark_test.go b/pkg/resource/helper_benchmark_test.go new file mode 100644 index 00000000000..01ef2e70acf --- /dev/null +++ b/pkg/resource/helper_benchmark_test.go @@ -0,0 +1,267 @@ +package resource_test + +import ( + "context" + "fmt" + "testing" + + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/openshift/hive/pkg/resource" + "github.com/openshift/hive/test/benchutil" +) + +func benchHelperOp(b *testing.B, template client.Object, fn func(*testing.B, resource.Helper, client.Object)) { + benchutil.ControllerHarness[*benchutil.HelperState]{ + NewObjects: benchutil.SingleTemplate(template), + Setup: benchutil.SetupLocalHelper, + Reconcile: func(b *testing.B, s *benchutil.HelperState, objects []client.Object, i int) { + fn(b, s.Helper, objects[i]) + }, + SteadyState: true, + }.Run(b) +} + +// BenchmarkNewHelper measures Helper construction cost. +func BenchmarkNewHelper(b *testing.B) { + benchutil.ControllerHarness[*benchutil.HelperState]{ + Setup: benchutil.SetupLocalHelper, + Reconcile: func(b *testing.B, s *benchutil.HelperState, _ []client.Object, _ int) { + benchutil.BenchHelper(b, s.RTCounter.Cfg, "benchmark") + }, + }.Run(b) +} + +// BenchmarkApply benchmarks Apply by resource type and payload size. +func BenchmarkApply(b *testing.B) { + applyFn := func(b *testing.B, helper resource.Helper, obj client.Object) { + serialized := benchutil.MustSerialize(obj) + if _, err := helper.Apply(serialized); err != nil { + b.Fatalf("apply failed: %v", err) + } + } + + b.Run("ByType", func(b *testing.B) { + resources := []struct { + name string + obj client.Object + }{ + {"ConfigMap", benchutil.GenerateConfigMap("apply-cm", 0)}, + {"Secret", benchutil.GenerateSecret("apply-secret", 0)}, + {"Deployment", benchutil.GenerateDeployment("apply-deploy")}, + {"ServiceAccount", benchutil.GenerateServiceAccount("apply-sa")}, + } + for _, tc := range resources { + b.Run(tc.name, func(b *testing.B) { + benchHelperOp(b, tc.obj, applyFn) + }) + } + }) + + b.Run("BySize", func(b *testing.B) { + sizes := []struct { + name string + size int + }{ + {"100B", 100}, + {"1KB", 1024}, + {"10KB", 10 * 1024}, + {"100KB", 100 * 1024}, + } + for _, sz := range sizes { + b.Run(sz.name, func(b *testing.B) { + benchHelperOp(b, benchutil.GenerateConfigMap("apply-sized", sz.size), applyFn) + }) + } + }) +} + +// BenchmarkPatch benchmarks all patch types (StrategicMerge, JSON, Merge). +func BenchmarkPatch(b *testing.B) { + patches := []struct { + name string + patchData string + patchType string + }{ + {"StrategicMerge", `{"data":{"newkey%d":"newvalue"}}`, "strategic"}, + {"JSONPatch", `[{"op":"add","path":"/data/newkey%d","value":"newvalue"}]`, "json"}, + {"MergePatch", `{"data":{"newkey%d":"newvalue"}}`, "merge"}, + } + + for _, tc := range patches { + b.Run(tc.name, func(b *testing.B) { + var target types.NamespacedName + benchutil.ControllerHarness[*benchutil.HelperState]{ + Setup: func(b *testing.B, env *benchutil.BenchEnv) *benchutil.HelperState { + cm := benchutil.CopyAndSetNamespace(benchutil.GenerateConfigMap("patch-cm", 0), env.Namespace) + if err := env.SeedClient.Create(context.Background(), cm); err != nil { + b.Fatalf("failed to seed configmap: %v", err) + } + target = types.NamespacedName{Name: "patch-cm", Namespace: env.Namespace} + return benchutil.SetupLocalHelper(b, env) + }, + Reconcile: func(b *testing.B, s *benchutil.HelperState, _ []client.Object, i int) { + patchData := []byte(fmt.Sprintf(tc.patchData, i)) + if err := s.Helper.Patch(target, "ConfigMap", "v1", patchData, tc.patchType); err != nil { + b.Fatalf("patch failed: %v", err) + } + }, + }.Run(b) + }) + } +} + +// BenchmarkApplyRuntimeObject benchmarks ApplyRuntimeObject. +func BenchmarkApplyRuntimeObject(b *testing.B) { + objects := []struct { + name string + obj client.Object + }{ + {"ConfigMap", benchutil.GenerateConfigMap("rt-cm", 0)}, + {"Secret", benchutil.GenerateSecret("rt-secret", 0)}, + {"ServiceAccount", benchutil.GenerateServiceAccount("rt-sa")}, + } + + for _, tc := range objects { + b.Run(tc.name, func(b *testing.B) { + benchHelperOp(b, tc.obj, func(b *testing.B, helper resource.Helper, obj client.Object) { + if _, err := helper.ApplyRuntimeObject(obj, scheme.Scheme); err != nil { + b.Fatalf("apply runtime object failed: %v", err) + } + }) + }) + } +} + +// BenchmarkCreateOrUpdate benchmarks CreateOrUpdate by resource type and payload size. +func BenchmarkCreateOrUpdate(b *testing.B) { + createOrUpdateFn := func(b *testing.B, helper resource.Helper, obj client.Object) { + serialized := benchutil.MustSerialize(obj) + if _, err := helper.CreateOrUpdate(serialized); err != nil { + b.Fatalf("createOrUpdate failed: %v", err) + } + } + + b.Run("ByType", func(b *testing.B) { + resources := []struct { + name string + obj client.Object + }{ + {"ConfigMap", benchutil.GenerateConfigMap("cou-cm", 0)}, + {"Secret", benchutil.GenerateSecret("cou-secret", 0)}, + {"Deployment", benchutil.GenerateDeployment("cou-deploy")}, + {"ServiceAccount", benchutil.GenerateServiceAccount("cou-sa")}, + } + for _, tc := range resources { + b.Run(tc.name, func(b *testing.B) { + benchHelperOp(b, tc.obj, createOrUpdateFn) + }) + } + }) + + b.Run("BySize", func(b *testing.B) { + sizes := []struct { + name string + size int + }{ + {"100B", 100}, + {"1KB", 1024}, + {"10KB", 10 * 1024}, + {"100KB", 100 * 1024}, + } + for _, sz := range sizes { + b.Run(sz.name, func(b *testing.B) { + benchHelperOp(b, benchutil.GenerateConfigMap("cou-sized", sz.size), createOrUpdateFn) + }) + } + }) +} + +// BenchmarkCreateOrUpdateRuntimeObject benchmarks CreateOrUpdateRuntimeObject. +func BenchmarkCreateOrUpdateRuntimeObject(b *testing.B) { + objects := []struct { + name string + obj client.Object + }{ + {"ConfigMap", benchutil.GenerateConfigMap("cou-rt-cm", 0)}, + {"Secret", benchutil.GenerateSecret("cou-rt-secret", 0)}, + {"ServiceAccount", benchutil.GenerateServiceAccount("cou-rt-sa")}, + } + + for _, tc := range objects { + b.Run(tc.name, func(b *testing.B) { + benchHelperOp(b, tc.obj, func(b *testing.B, helper resource.Helper, obj client.Object) { + if _, err := helper.CreateOrUpdateRuntimeObject(obj, scheme.Scheme); err != nil { + b.Fatalf("createOrUpdate runtime object failed: %v", err) + } + }) + }) + } +} + +// BenchmarkCreate benchmarks Create (FirstCreate and AlreadyExists paths). +func BenchmarkCreate(b *testing.B) { + b.Run("FirstCreate", func(b *testing.B) { + benchutil.ControllerHarness[*benchutil.HelperState]{ + NewObjects: func(b *testing.B, ns string) []client.Object { + objects := make([]client.Object, b.N) + for i := range objects { + objects[i] = benchutil.CopyAndSetNamespace( + benchutil.GenerateConfigMap(fmt.Sprintf("create-%d", i), 0), ns) + } + return objects + }, + Setup: benchutil.SetupLocalHelper, + Reconcile: func(b *testing.B, s *benchutil.HelperState, objects []client.Object, i int) { + serialized := benchutil.MustSerialize(objects[i]) + if _, err := s.Helper.Create(serialized); err != nil { + b.Fatalf("create failed: %v", err) + } + }, + }.Run(b) + }) + + b.Run("AlreadyExists", func(b *testing.B) { + benchutil.ControllerHarness[*benchutil.HelperState]{ + NewObjects: benchutil.SingleTemplate(benchutil.GenerateConfigMap("create-existing", 0)), + Setup: func(b *testing.B, env *benchutil.BenchEnv) *benchutil.HelperState { + obj := benchutil.CopyAndSetNamespace(benchutil.GenerateConfigMap("create-existing", 0), env.Namespace) + if err := env.SeedClient.Create(context.Background(), obj); err != nil { + b.Fatalf("failed to seed resource: %v", err) + } + return benchutil.SetupLocalHelper(b, env) + }, + Reconcile: func(b *testing.B, s *benchutil.HelperState, objects []client.Object, i int) { + serialized := benchutil.MustSerialize(objects[i]) + if _, err := s.Helper.Create(serialized); err != nil { + b.Fatalf("create failed: %v", err) + } + }, + }.Run(b) + }) +} + +// BenchmarkDelete benchmarks Delete with pre-seeded resources. +func BenchmarkDelete(b *testing.B) { + var ns string + benchutil.ControllerHarness[*benchutil.HelperState]{ + Setup: func(b *testing.B, env *benchutil.BenchEnv) *benchutil.HelperState { + ns = env.Namespace + for i := 0; i < b.N; i++ { + cm := benchutil.CopyAndSetNamespace( + benchutil.GenerateConfigMap(fmt.Sprintf("delete-%d", i), 0), ns) + if err := env.SeedClient.Create(context.Background(), cm); err != nil { + b.Fatalf("failed to seed resource: %v", err) + } + } + return benchutil.SetupLocalHelper(b, env) + }, + Reconcile: func(b *testing.B, s *benchutil.HelperState, _ []client.Object, i int) { + if err := s.Helper.Delete("v1", "ConfigMap", ns, fmt.Sprintf("delete-%d", i)); err != nil { + b.Fatalf("delete failed: %v", err) + } + }, + }.Run(b) +} diff --git a/test/benchmark/README.md b/test/benchmark/README.md new file mode 100644 index 00000000000..457b54e6b16 --- /dev/null +++ b/test/benchmark/README.md @@ -0,0 +1,121 @@ +# Hive Resource/RemoteClient Benchmarks + +Performance benchmarks for validating safe optimizations to Hive's legacy +`pkg/resource` (`Helper`) and `pkg/remoteclient` packages. + +## Context + +The clustersync controller and others that depend on these packages have been +repeatedly flagged by downstream users for slow reconciliation, memory leaks, +and excessive API server load. These packages are legacy code with behavioral +dependencies that prevent rewrites. + +These benchmarks enable targeted optimizations by: +1. Identifying hot paths through round-trip and allocation measurement +2. Proving performance improvements via A/B comparison +3. Detecting behavioral regressions - unexpected metric changes indicate logic changes + +## Quick Start + +```bash +# One-time setup +make install-tools +make setup-envtest +export KUBEBUILDER_ASSETS="$(hack/setup-envtest.sh | tail -1 | cut -d'=' -f2 | tr -d '"')" + +# Run all benchmarks +go test -bench=. -benchmem -benchtime=5x -count=6 \ + ./pkg/remoteclient/ ./pkg/resource/ ./test/benchmark/ + +# Compare before/after a change +./hack/benchmark-comparison.sh [base-commit] +``` + +## What's Measured + +Each benchmark reports: +- Wall time - Total operation duration +- Allocations - Memory allocations per operation (MB/op, allocs/op) +- Round trips - HTTP calls to API server (custom metric) +- Bytes transferred - Network traffic (custom metric) + +Key insight: Round-trip count changes indicate behavioral changes. +If your optimization changes round-trip count, it's not just a performance +improvement -- it's a logic change that needs careful review. + +### Benchmark Coverage + +| Package | What it measures | +|---------------------|----------------------------------------------------------------------------| +| `pkg/remoteclient/` | RESTConfig parsing, client creation, discovery | +| `pkg/resource/` | Apply, Patch, Delete, CreateOrUpdate operations | +| `test/benchmark/` | Full reconciliation patterns (clustersync, machinepool, hibernation, etc.) | + +## Validating Optimizations + +```bash +# 1. Make your optimization +vim pkg/resource/apply.go + +# 2. Run comparison against base (main branch or HEAD~1) +./hack/benchmark-comparison.sh main + +# Or specify custom settings +BENCHTIME=10x COUNT=8 ./hack/benchmark-comparison.sh +``` + +## Architecture + +Benchmarks run against a real kube-apiserver + etcd (via controller-runtime's envtest), +not mocks, giving accurate round-trip and byte measurements. Each benchmark uses +`benchutil.ControllerHarness` -- a generic primitive that handles environment setup, +object seeding, counter management, and metric reporting. + +For detailed internals (envtest lifecycle, harness field semantics, round-trip counting, +namespace isolation, the full benchmark catalog, and measurement caveats) see: + +```bash +go doc github.com/openshift/hive/test/benchutil +``` + +## Adding Benchmarks + +Most common patterns are already covered. To add a new one: + +```go +// In pkg/resource/helper_benchmark_test.go +func BenchmarkMyOperation(b *testing.B) { + benchutil.ControllerHarness[*benchutil.HelperState]{ + NewObjects: benchutil.SingleTemplate(benchutil.GenerateConfigMap("test", 0)), + Setup: benchutil.SetupLocalHelper, + Reconcile: func(b *testing.B, s *benchutil.HelperState, objs []client.Object, i int) { + if _, err := s.Helper.MyOperation(objs[i]); err != nil { + b.Fatalf("MyOperation failed: %v", err) + } + }, + SteadyState: true, + }.Run(b) +} +``` + +See `test/benchmark/controller_sim_benchmark_test.go` for full controller reconciliation examples. + +## Troubleshooting + +### `KUBEBUILDER_ASSETS` not set: +```bash +make setup-envtest +export KUBEBUILDER_ASSETS="$(hack/setup-envtest.sh | tail -1 | cut -d'=' -f2 | tr -d '"')" +``` + +### Benchmark variance too high: +- Increase iteration count: `-benchtime=10x` +- Increase run count: `-count=10` +- Close resource-intensive programs + +### `envtest` startup failure: +```bash +# Verify setup-envtest installation +setup-envtest list +setup-envtest use -p path +``` diff --git a/test/benchmark/benchmark_test.go b/test/benchmark/benchmark_test.go new file mode 100644 index 00000000000..7edbf34f8d4 --- /dev/null +++ b/test/benchmark/benchmark_test.go @@ -0,0 +1,14 @@ +package benchmark + +import ( + "os" + "testing" + + "github.com/openshift/hive/test/benchutil" +) + +func TestMain(m *testing.M) { + code := m.Run() + benchutil.StopEnvTest() + os.Exit(code) +} diff --git a/test/benchmark/controller_sim_benchmark_test.go b/test/benchmark/controller_sim_benchmark_test.go new file mode 100644 index 00000000000..f032c36b91d --- /dev/null +++ b/test/benchmark/controller_sim_benchmark_test.go @@ -0,0 +1,459 @@ +package benchmark + +import ( + "context" + "fmt" + "testing" + + configv1 "github.com/openshift/api/config/v1" + machinev1beta1 "github.com/openshift/api/machine/v1beta1" + "github.com/openshift/hive/test/benchutil" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Common ClusterSync reconcile functions (RESTConfig + Helper per iteration). + +func clusterSyncApply(b *testing.B, brc *benchutil.BenchRemoteClient, objects []client.Object, _ int) { + restCfg, err := brc.NewBuilder().RESTConfig() + if err != nil { + b.Fatalf("RESTConfig failed: %v", err) + } + helper := benchutil.BenchHelper(b, restCfg, "benchmark") + for _, obj := range objects { + if _, err := helper.Apply(benchutil.MustSerialize(obj)); err != nil { + b.Fatalf("apply failed: %v", err) + } + } +} + +func clusterSyncCreateOrUpdate(b *testing.B, brc *benchutil.BenchRemoteClient, objects []client.Object, _ int) { + restCfg, err := brc.NewBuilder().RESTConfig() + if err != nil { + b.Fatalf("RESTConfig failed: %v", err) + } + helper := benchutil.BenchHelper(b, restCfg, "benchmark") + for _, obj := range objects { + if _, err := helper.CreateOrUpdate(benchutil.MustSerialize(obj)); err != nil { + b.Fatalf("createOrUpdate failed: %v", err) + } + } +} + +// Pattern 1: RESTConfig -> resource.Helper -- models clustersync. + +func BenchmarkControllerReconcileClusterSync(b *testing.B) { + b.Run("Small", func(b *testing.B) { + benchutil.ControllerHarness[*benchutil.BenchRemoteClient]{ + NewObjects: func(_ *testing.B, ns string) []client.Object { + return []client.Object{ + benchutil.CopyAndSetNamespace(benchutil.GenerateConfigMap("syncset-cm", 0), ns), + benchutil.CopyAndSetNamespace(benchutil.GenerateSecret("syncset-secret", 0), ns), + benchutil.CopyAndSetNamespace(benchutil.GenerateDeployment("syncset-deploy"), ns), + benchutil.CopyAndSetNamespace(benchutil.GenerateServiceAccount("syncset-sa"), ns), + } + }, + Setup: benchutil.SetupRemoteClient, + Reconcile: clusterSyncApply, + SteadyState: true, + }.Run(b) + }) + + b.Run("Large", func(b *testing.B) { + benchutil.ControllerHarness[*benchutil.BenchRemoteClient]{ + NewObjects: func(_ *testing.B, ns string) []client.Object { + // 5 ConfigMaps + 4 Secrets + 3 Deployments + 3 ServiceAccounts + 2 large ConfigMaps = 17 + objects := make([]client.Object, 0, 17) + for i := 0; i < 5; i++ { + objects = append(objects, benchutil.CopyAndSetNamespace( + benchutil.GenerateConfigMap(fmt.Sprintf("config-%d", i), 1024), ns)) + } + for i := 0; i < 4; i++ { + objects = append(objects, benchutil.CopyAndSetNamespace( + benchutil.GenerateSecret(fmt.Sprintf("secret-%d", i), 512), ns)) + } + for i := 0; i < 3; i++ { + objects = append(objects, benchutil.CopyAndSetNamespace( + benchutil.GenerateDeployment(fmt.Sprintf("deploy-%d", i)), ns)) + } + for i := 0; i < 3; i++ { + objects = append(objects, benchutil.CopyAndSetNamespace( + benchutil.GenerateServiceAccount(fmt.Sprintf("sa-%d", i)), ns)) + } + objects = append(objects, + benchutil.CopyAndSetNamespace(benchutil.GenerateConfigMap("ca-bundle", 10*1024), ns), + benchutil.CopyAndSetNamespace(benchutil.GenerateConfigMap("install-script", 5*1024), ns), + ) + return objects + }, + Setup: benchutil.SetupRemoteClient, + Reconcile: clusterSyncApply, + SteadyState: true, + }.Run(b) + }) + + b.Run("WithDelete", func(b *testing.B) { + benchutil.ControllerHarness[*benchutil.BenchRemoteClient]{ + NewObjects: func(_ *testing.B, ns string) []client.Object { + return []client.Object{ + benchutil.CopyAndSetNamespace(benchutil.GenerateConfigMap("active-cm", 0), ns), + benchutil.CopyAndSetNamespace(benchutil.GenerateSecret("active-secret", 0), ns), + benchutil.CopyAndSetNamespace(benchutil.GenerateServiceAccount("active-sa"), ns), + } + }, + Setup: func(b *testing.B, env *benchutil.BenchEnv) *benchutil.BenchRemoteClient { + for i := 0; i < b.N; i++ { + cm := benchutil.CopyAndSetNamespace( + benchutil.GenerateConfigMap(fmt.Sprintf("stale-config-%d", i), 0), env.Namespace) + if err := env.SeedClient.Create(context.Background(), cm); err != nil { + b.Fatalf("failed to seed stale resource: %v", err) + } + } + return benchutil.SetupRemoteClient(b, env) + }, + Reconcile: func(b *testing.B, brc *benchutil.BenchRemoteClient, objects []client.Object, i int) { + restCfg, err := brc.NewBuilder().RESTConfig() + if err != nil { + b.Fatalf("RESTConfig failed: %v", err) + } + helper := benchutil.BenchHelper(b, restCfg, "benchmark") + for _, obj := range objects { + if _, err := helper.Apply(benchutil.MustSerialize(obj)); err != nil { + b.Fatalf("apply failed: %v", err) + } + } + if err := helper.Delete("v1", "ConfigMap", objects[0].GetNamespace(), fmt.Sprintf("stale-config-%d", i)); err != nil { + b.Fatalf("delete failed: %v", err) + } + }, + }.Run(b) + }) + + // WithStaticPatch: unchanging patch data (idempotent after first iteration). + b.Run("WithStaticPatch", func(b *testing.B) { + benchutil.ControllerHarness[*benchutil.BenchRemoteClient]{ + NewObjects: func(_ *testing.B, ns string) []client.Object { + return []client.Object{ + benchutil.CopyAndSetNamespace(benchutil.GenerateConfigMap("active-cm", 0), ns), + benchutil.CopyAndSetNamespace(benchutil.GenerateSecret("active-secret", 0), ns), + } + }, + Setup: func(b *testing.B, env *benchutil.BenchEnv) *benchutil.BenchRemoteClient { + target := benchutil.CopyAndSetNamespace(benchutil.GenerateConfigMap("patch-target", 0), env.Namespace) + if err := env.SeedClient.Create(context.Background(), target); err != nil { + b.Fatalf("failed to seed patch target: %v", err) + } + return benchutil.SetupRemoteClient(b, env) + }, + Reconcile: func(b *testing.B, brc *benchutil.BenchRemoteClient, objects []client.Object, _ int) { + restCfg, err := brc.NewBuilder().RESTConfig() + if err != nil { + b.Fatalf("RESTConfig failed: %v", err) + } + helper := benchutil.BenchHelper(b, restCfg, "benchmark") + for _, obj := range objects { + if _, err := helper.Apply(benchutil.MustSerialize(obj)); err != nil { + b.Fatalf("apply failed: %v", err) + } + } + if err := helper.Patch(types.NamespacedName{ + Name: "patch-target", Namespace: objects[0].GetNamespace(), + }, "ConfigMap", "v1", []byte(`{"data":{"patched":"true"}}`), "strategic"); err != nil { + b.Fatalf("patch failed: %v", err) + } + }, + }.Run(b) + }) + + // WithDynamicPatch: patch data varies per iteration, forcing real writes. + b.Run("WithDynamicPatch", func(b *testing.B) { + benchutil.ControllerHarness[*benchutil.BenchRemoteClient]{ + NewObjects: func(_ *testing.B, ns string) []client.Object { + return []client.Object{ + benchutil.CopyAndSetNamespace(benchutil.GenerateConfigMap("active-cm", 0), ns), + benchutil.CopyAndSetNamespace(benchutil.GenerateSecret("active-secret", 0), ns), + } + }, + Setup: func(b *testing.B, env *benchutil.BenchEnv) *benchutil.BenchRemoteClient { + target := benchutil.CopyAndSetNamespace(benchutil.GenerateConfigMap("patch-target", 0), env.Namespace) + if err := env.SeedClient.Create(context.Background(), target); err != nil { + b.Fatalf("failed to seed patch target: %v", err) + } + return benchutil.SetupRemoteClient(b, env) + }, + Reconcile: func(b *testing.B, brc *benchutil.BenchRemoteClient, objects []client.Object, i int) { + restCfg, err := brc.NewBuilder().RESTConfig() + if err != nil { + b.Fatalf("RESTConfig failed: %v", err) + } + helper := benchutil.BenchHelper(b, restCfg, "benchmark") + for _, obj := range objects { + if _, err := helper.Apply(benchutil.MustSerialize(obj)); err != nil { + b.Fatalf("apply failed: %v", err) + } + } + if err := helper.Patch(types.NamespacedName{ + Name: "patch-target", Namespace: objects[0].GetNamespace(), + }, "ConfigMap", "v1", []byte(fmt.Sprintf(`{"data":{"newkey%d":"value"}}`, i)), "strategic"); err != nil { + b.Fatalf("patch failed: %v", err) + } + }, + }.Run(b) + }) +} + +// BenchmarkControllerReconcileClusterSyncCreateOrUpdate is an A/B comparison with Apply. +func BenchmarkControllerReconcileClusterSyncCreateOrUpdate(b *testing.B) { + b.Run("Small", func(b *testing.B) { + benchutil.ControllerHarness[*benchutil.BenchRemoteClient]{ + NewObjects: func(_ *testing.B, ns string) []client.Object { + return []client.Object{ + benchutil.CopyAndSetNamespace(benchutil.GenerateConfigMap("cou-cm", 0), ns), + benchutil.CopyAndSetNamespace(benchutil.GenerateSecret("cou-secret", 0), ns), + benchutil.CopyAndSetNamespace(benchutil.GenerateDeployment("cou-deploy"), ns), + benchutil.CopyAndSetNamespace(benchutil.GenerateServiceAccount("cou-sa"), ns), + } + }, + Setup: benchutil.SetupRemoteClient, + Reconcile: clusterSyncCreateOrUpdate, + SteadyState: true, + }.Run(b) + }) +} + +// Pattern 2: Build -> Get (read-only) -- models clusterversion, clusterstate. + +func BenchmarkControllerReconcileClusterVersion(b *testing.B) { + benchutil.ControllerHarness[*benchutil.BenchRemoteClient]{ + Setup: func(b *testing.B, env *benchutil.BenchEnv) *benchutil.BenchRemoteClient { + // Seed a ClusterVersion to read (cluster-scoped singleton). + cv := &configv1.ClusterVersion{ + ObjectMeta: metav1.ObjectMeta{Name: "version"}, + Spec: configv1.ClusterVersionSpec{ + ClusterID: "bench-cluster-id", + }, + } + if err := env.SeedClient.Get(context.Background(), client.ObjectKeyFromObject(cv), cv); apierrors.IsNotFound(err) { + if err := env.SeedClient.Create(context.Background(), cv); err != nil { + b.Fatalf("failed to seed ClusterVersion: %v", err) + } + } else if err != nil { + b.Fatalf("failed to check for existing ClusterVersion: %v", err) + } + return benchutil.SetupRemoteClient(b, env) + }, + Reconcile: func(b *testing.B, brc *benchutil.BenchRemoteClient, _ []client.Object, _ int) { + remoteClient, err := brc.NewBuilder().Build() + if err != nil { + b.Fatalf("Build failed: %v", err) + } + got := &configv1.ClusterVersion{} + if err := remoteClient.Get(context.Background(), client.ObjectKey{Name: "version"}, got); err != nil { + b.Fatalf("Get failed: %v", err) + } + }, + }.Run(b) +} + +// Pattern 3: Build -> CRUD -- models machinepool. +// Separate harness calls per mode because FirstApply creates and SteadyState updates. + +func BenchmarkControllerReconcileMachinePool(b *testing.B) { + type firstApplyState struct { + *benchutil.BenchRemoteClient + ns string + } + b.Run("FirstApply", func(b *testing.B) { + benchutil.ControllerHarness[*firstApplyState]{ + Setup: func(b *testing.B, env *benchutil.BenchEnv) *firstApplyState { + return &firstApplyState{ + BenchRemoteClient: benchutil.SetupRemoteClient(b, env), + ns: env.Namespace, + } + }, + Reconcile: func(b *testing.B, s *firstApplyState, _ []client.Object, i int) { + remoteClient, err := s.NewBuilder().Build() + if err != nil { + b.Fatalf("Build failed: %v", err) + } + workerName := fmt.Sprintf("worker-%d", i) + + ms := &machinev1beta1.MachineSet{} + if err := remoteClient.Get(context.Background(), client.ObjectKey{ + Namespace: s.ns, Name: workerName, + }, ms); err != nil && !apierrors.IsNotFound(err) { + b.Fatalf("Get MachineSet failed: %v", err) + } + + machineList := &machinev1beta1.MachineList{} + if err := remoteClient.List(context.Background(), machineList, client.InNamespace(s.ns)); err != nil { + b.Fatalf("List Machines failed: %v", err) + } + + machineSetList := &machinev1beta1.MachineSetList{} + if err := remoteClient.List(context.Background(), machineSetList, client.InNamespace(s.ns)); err != nil { + b.Fatalf("List MachineSets failed: %v", err) + } + + replicas := int32(3) + toCreate := &machinev1beta1.MachineSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: workerName, + Namespace: s.ns, + }, + Spec: machinev1beta1.MachineSetSpec{ + Replicas: &replicas, + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{"machine.openshift.io/cluster-api-machineset": workerName}, + }, + }, + } + if err := remoteClient.Create(context.Background(), toCreate); err != nil { + b.Fatalf("Create failed: %v", err) + } + }, + }.Run(b) + }) + + type steadyState struct { + *benchutil.BenchRemoteClient + ns string + existing *machinev1beta1.MachineSet + } + b.Run("SteadyState", func(b *testing.B) { + benchutil.ControllerHarness[*steadyState]{ + Setup: func(b *testing.B, env *benchutil.BenchEnv) *steadyState { + replicas := int32(3) + ms := &machinev1beta1.MachineSet{ + ObjectMeta: metav1.ObjectMeta{Name: "worker", Namespace: env.Namespace}, + Spec: machinev1beta1.MachineSetSpec{ + Replicas: &replicas, + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{"machine.openshift.io/cluster-api-machineset": "worker"}, + }, + }, + } + if err := env.SeedClient.Create(context.Background(), ms); err != nil { + b.Fatalf("failed to seed MachineSet: %v", err) + } + return &steadyState{ + BenchRemoteClient: benchutil.SetupRemoteClient(b, env), + ns: env.Namespace, + existing: ms, + } + }, + Reconcile: func(b *testing.B, s *steadyState, _ []client.Object, i int) { + remoteClient, err := s.NewBuilder().Build() + if err != nil { + b.Fatalf("Build failed: %v", err) + } + + got := &machinev1beta1.MachineSet{} + if err := remoteClient.Get(context.Background(), client.ObjectKeyFromObject(s.existing), got); err != nil { + b.Fatalf("Get failed: %v", err) + } + + machineList := &machinev1beta1.MachineList{} + if err := remoteClient.List(context.Background(), machineList, client.InNamespace(s.ns)); err != nil { + b.Fatalf("List Machines failed: %v", err) + } + + machineSetList := &machinev1beta1.MachineSetList{} + if err := remoteClient.List(context.Background(), machineSetList, client.InNamespace(s.ns)); err != nil { + b.Fatalf("List MachineSets failed: %v", err) + } + + newReplicas := int32(i + 1) + got.Spec.Replicas = &newReplicas + if err := remoteClient.Update(context.Background(), got); err != nil { + b.Fatalf("Update failed: %v", err) + } + }, + }.Run(b) + }) +} + +// Pattern 4: Build + BuildKubeClient -- models hibernation. + +func BenchmarkControllerReconcileHibernation(b *testing.B) { + benchutil.ControllerHarness[*benchutil.BenchRemoteClient]{ + Setup: func(b *testing.B, env *benchutil.BenchEnv) *benchutil.BenchRemoteClient { + for i := 0; i < 3; i++ { + machine := &machinev1beta1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("worker-%d", i), + Namespace: env.Namespace, + }, + } + if err := env.SeedClient.Create(context.Background(), machine); err != nil { + b.Fatalf("failed to seed Machine: %v", err) + } + } + return benchutil.SetupRemoteClient(b, env) + }, + Reconcile: func(b *testing.B, brc *benchutil.BenchRemoteClient, _ []client.Object, _ int) { + // First client: controller-runtime (Build) + remoteClient, err := brc.NewBuilder().Build() + if err != nil { + b.Fatalf("Build failed: %v", err) + } + + machineList := &machinev1beta1.MachineList{} + if err := remoteClient.List(context.Background(), machineList); err != nil { + b.Fatalf("List Machines failed: %v", err) + } + + nodeList := &corev1.NodeList{} + if err := remoteClient.List(context.Background(), nodeList); err != nil { + b.Fatalf("List Nodes failed: %v", err) + } + + coList := &configv1.ClusterOperatorList{} + if err := remoteClient.List(context.Background(), coList); err != nil { + b.Fatalf("List ClusterOperators failed: %v", err) + } + + // Second client: typed kubernetes (BuildKubeClient) + kubeClient, err := brc.NewBuilder().BuildKubeClient() + if err != nil { + b.Fatalf("BuildKubeClient failed: %v", err) + } + + if _, err := kubeClient.CertificatesV1().CertificateSigningRequests().List(context.Background(), metav1.ListOptions{}); err != nil { + b.Fatalf("kube List CSRs failed: %v", err) + } + }, + }.Run(b) +} + +// Pattern 5: UsePrimaryAPIURL().Build() -- models unreachable. + +func BenchmarkControllerReconcileUnreachable(b *testing.B) { + benchutil.ControllerHarness[*benchutil.BenchRemoteClient]{ + Setup: benchutil.SetupRemoteClient, + Reconcile: func(b *testing.B, brc *benchutil.BenchRemoteClient, _ []client.Object, _ int) { + if _, err := brc.NewBuilder().UsePrimaryAPIURL().Build(); err != nil { + b.Fatalf("Build failed: %v", err) + } + }, + }.Run(b) +} + +// Pattern 6: Local-only Helper -- models controlplanecerts, remoteingress. + +func BenchmarkControllerReconcileControlPlaneCerts(b *testing.B) { + benchutil.ControllerHarness[*benchutil.HelperState]{ + NewObjects: benchutil.SingleTemplate(benchutil.GenerateSyncSet("controlplane-certs")), + Setup: benchutil.SetupLocalHelper, + Reconcile: func(b *testing.B, s *benchutil.HelperState, objects []client.Object, i int) { + if _, err := s.Helper.ApplyRuntimeObject(objects[i], scheme.Scheme); err != nil { + b.Fatalf("ApplyRuntimeObject failed: %v", err) + } + }, + SteadyState: true, + }.Run(b) +} diff --git a/test/benchutil/benchenv.go b/test/benchutil/benchenv.go new file mode 100644 index 00000000000..42fff828ea4 --- /dev/null +++ b/test/benchutil/benchenv.go @@ -0,0 +1,97 @@ +package benchutil + +import ( + "context" + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + "sigs.k8s.io/controller-runtime/pkg/client" + + hivev1 "github.com/openshift/hive/apis/hive/v1" + "github.com/openshift/hive/pkg/constants" +) + +// BenchEnv bundles config, client, ClusterDeployment, and namespace for a benchmark. +type BenchEnv struct { + // Cfg is the base envtest REST config. + Cfg *rest.Config + + // SeedClient is a shared controller-runtime client to the envtest cluster. + // It should be used to seed data necessary for the benchmarks. It should NOT + // be used in the actual benchmarks. + SeedClient client.Client + + // CD is an in-memory ClusterDeployment with a kubeconfig Secret + // pointing at the envtest cluster. Only the Secret is created. + CD *hivev1.ClusterDeployment + + // Namespace is a unique namespace for benchmark isolation. + Namespace string +} + +// NewBenchEnv creates a fully initialized benchmark environment. +func NewBenchEnv(b *testing.B) *BenchEnv { + b.Helper() + cfg := EnsureEnvTest(b) + ns := BenchNamespace(b, cfg) + + // Build a kubeconfig Secret pointing at the envtest API server. + kubeconfig := clientcmdapi.Config{ + Clusters: map[string]*clientcmdapi.Cluster{ + "envtest": { + Server: cfg.Host, + CertificateAuthorityData: cfg.CAData, + }, + }, + AuthInfos: map[string]*clientcmdapi.AuthInfo{ + "envtest": { + ClientCertificateData: cfg.CertData, + ClientKeyData: cfg.KeyData, + }, + }, + Contexts: map[string]*clientcmdapi.Context{ + "envtest": {Cluster: "envtest", AuthInfo: "envtest"}, + }, + CurrentContext: "envtest", + } + kubeconfigBytes, err := clientcmd.Write(kubeconfig) + if err != nil { + b.Fatalf("failed to serialize kubeconfig: %v", err) + } + + secretName := "admin-kubeconfig" + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: secretName, Namespace: ns}, + Data: map[string][]byte{ + constants.KubeconfigSecretKey: kubeconfigBytes, + }, + } + if err := seedClient.Create(context.Background(), secret); err != nil { + b.Fatalf("failed to create kubeconfig secret: %v", err) + } + + cd := &hivev1.ClusterDeployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bench-cluster", + Namespace: ns, + }, + Spec: hivev1.ClusterDeploymentSpec{ + ClusterMetadata: &hivev1.ClusterMetadata{ + AdminKubeconfigSecretRef: corev1.LocalObjectReference{ + Name: secretName, + }, + }, + }, + } + + return &BenchEnv{ + Cfg: cfg, + SeedClient: seedClient, + CD: cd, + Namespace: ns, + } +} diff --git a/test/benchutil/client.go b/test/benchutil/client.go new file mode 100644 index 00000000000..6ed14dc297e --- /dev/null +++ b/test/benchutil/client.go @@ -0,0 +1,74 @@ +package benchutil + +import ( + "fmt" + "testing" + + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + + hivev1 "github.com/openshift/hive/apis/hive/v1" + "github.com/openshift/hive/pkg/remoteclient" + hivescheme "github.com/openshift/hive/pkg/util/scheme" +) + +// BenchRemoteClient bundles dual local/remote round trip counting with builder creation. +type BenchRemoteClient struct { + env *BenchEnv + controllerName hivev1.ControllerName + localRTs *RTCounter + localClient client.Client + remoteRTs *RTCounter +} + +// NewBenchRemoteClient creates dual counters and a counting local client. +func NewBenchRemoteClient(b *testing.B, env *BenchEnv, controllerName string) *BenchRemoteClient { + b.Helper() + localRTs := TrackRoundTrips(env.Cfg) + localClient, err := newClient(localRTs.Cfg) + if err != nil { + b.Fatalf("failed to create local client: %v", err) + } + return &BenchRemoteClient{ + env: env, + controllerName: hivev1.ControllerName(controllerName), + localRTs: localRTs, + localClient: localClient, + remoteRTs: NewRTCounter(), + } +} + +// NewBuilder creates a Builder with the remote counter wired in. +func (brc *BenchRemoteClient) NewBuilder() remoteclient.Builder { + return remoteclient.NewBuilder(brc.localClient, brc.env.CD, brc.controllerName, + remoteclient.WithTransportWrapper(brc.remoteRTs.WrapTransport)) +} + +// ResetAll zeros both counters and resets the benchmark timer. +func (brc *BenchRemoteClient) ResetAll(b *testing.B) { + brc.localRTs.reset() + brc.remoteRTs.reset() + b.ResetTimer() +} + +// Report emits local and remote metrics. +func (brc *BenchRemoteClient) Report(b *testing.B) { + brc.localRTs.ReportAs(b, "local") + brc.remoteRTs.ReportAs(b, "remote") +} + +func newClient(cfg *rest.Config) (client.Client, error) { + httpClient, err := rest.HTTPClientFor(cfg) + if err != nil { + return nil, fmt.Errorf("failed to create HTTP client: %w", err) + } + mapper, err := apiutil.NewDynamicRESTMapper(cfg, httpClient) + if err != nil { + return nil, fmt.Errorf("failed to create REST mapper: %w", err) + } + return client.New(cfg, client.Options{ + Scheme: hivescheme.GetScheme(), + Mapper: mapper, + }) +} diff --git a/test/benchutil/doc.go b/test/benchutil/doc.go new file mode 100644 index 00000000000..fb384305fde --- /dev/null +++ b/test/benchutil/doc.go @@ -0,0 +1,104 @@ +// Package benchutil provides shared infrastructure for Hive's benchmark suites. +// +// Three packages use this infrastructure: +// +// - pkg/remoteclient/ -- Builder operations (RESTConfig, Build, BuildDynamic, BuildKubeClient) +// - pkg/resource/ -- Isolated Helper operations (Apply, CreateOrUpdate, Create, Delete, Patch, NewHelper) +// - test/benchmark/ -- Full-stack controller simulations (remoteclient + Helper) +// +// See test/benchmark/README.md for quick-start instructions and the user-facing guide. +// This doc covers the internals. +// +// # envtest +// +// Benchmarks run against a real Kubernetes API server (etcd + kube-apiserver) via +// controller-runtime's envtest. Apply, Patch, and Delete go through real API server +// validation, storage, and admission -- not mocks. There is no kubelet or controller +// manager; only the API server and etcd are running. +// +// Hive CRDs are loaded from config/crds/. OpenShift platform CRDs (ClusterVersion, +// ClusterOperator, Machine, MachineSet) are built at runtime as minimal +// CustomResourceDefinition objects with x-kubernetes-preserve-unknown-fields -- no +// checked-in YAML or controller-gen dependency needed. See [openshiftCRDs] in envtest.go. +// +// Each benchmark package gets its own envtest instance. Within a package, a single +// instance is shared across all benchmarks. It is started lazily on the first call to +// [EnsureEnvTest] (via sync.Once) and torn down in that package's TestMain. This means: +// +// - Running only unit tests (e.g. go test -run TestFoo ./pkg/resource/) never starts envtest. +// - Running any benchmark starts envtest exactly once per package, regardless of how many benchmarks execute. +// - API server state accumulates across benchmarks -- namespace isolation (below) prevents interference. +// +// # ControllerHarness +// +// [ControllerHarness] is the central benchmark primitive. Every benchmark across all +// three packages uses it. It handles environment creation, object seeding, counter +// management, b.ReportAllocs(), timer reset, the benchmark loop, and metric reporting -- +// leaving each benchmark to define only what varies. +// +// Fields: +// +// - NewObjects -- optional factory that creates the objects to reconcile. Receives +// *testing.B (for b.N access when creating per-iteration unique objects) and the +// isolated namespace. When nil, Reconcile receives a nil slice. +// +// - Setup -- creates per-sub-benchmark state. Must return a [Resettable] (satisfied by +// [RTCounter], [BenchRemoteClient], and [HelperState]). Custom seeding goes here. +// +// - Reconcile -- runs one iteration. Receives typed state, the full object list, and +// the iteration index. When NewObjects returns b.N uniquely-named objects (e.g. via +// [SingleTemplate]), use objects[i]. When it returns a fixed set to apply every +// iteration, iterate over the whole slice and ignore i. +// +// - SteadyState -- when false (default), Run executes at the current benchmark level +// with FirstApply semantics (objects do not exist). When true, Run creates FirstApply +// and SteadyState sub-benchmarks, where SteadyState pre-seeds all objects via +// SeedClient.Create before measurement. +// +// Common Setup functions: +// +// - [SetupLocalHelper] -- shared Helper + single RTCounter (local-only benchmarks) +// - [SetupRemoteClient] -- dual counters for remoteclient benchmarks +// - [SingleTemplate] -- NewObjects factory producing b.N uniquely-named copies of a template +// +// # BenchEnv and namespace isolation +// +// [NewBenchEnv] creates a fully initialized environment in one call: starts envtest (if +// not already running), creates an isolated namespace (bench-1, bench-2, ... via atomic +// counter), builds a shared controller-runtime client (for resource seeding), and sets up +// a kubeconfig Secret + in-memory ClusterDeployment for remoteclient use. +// +// The ClusterDeployment and its associated kubeconfig Secret point back at the envtest +// API server, so the same server acts as both the "management" cluster (where Secrets +// live) and the "remote" cluster (where resources are applied). +// +// # Round-trip and byte counting +// +// Every benchmark reports custom metrics tracking HTTP round trips and bytes transferred +// via [RTCounter]. Both RTCounter and [BenchRemoteClient] satisfy the [Resettable] +// interface used by the harness. +// +// BenchRemoteClient wraps two RTCounter instances -- one for management cluster API calls +// (kubeconfig Secret reads) and one for remote cluster calls (discovery, Apply, Patch, +// Delete). These appear as local-roundtrips/op, remote-roundtrips/op, etc. in benchmark +// output. +// +// # What the benchmarks don't measure +// +// Prometheus metrics transport: In production, some controllers create their +// resource.Helper with resource.WithMetrics(), which wraps the HTTP transport with a +// Prometheus counter. The benchmarks omit this wrapper, but the overhead is negligible +// (an atomic counter increment per round trip). +// +// Controller metrics transport: remoteclient.Builder.RESTConfig() injects a controller +// metrics transport via AddControllerMetricsTransportWrapper. This IS present in the +// benchmarks (the real remoteclient code runs), so remote round trips include this +// overhead. +// +// Discovery disk cache: resource.Helper uses a disk-cached discovery client with a +// 10-minute TTL. The first iteration of a benchmark warms this cache; subsequent +// iterations benefit from it. This matches production behavior (the cache persists across +// reconciles), but the reported roundtrips/op is an amortized average -- the first +// iteration has more HTTP round trips than later ones. For small b.N values, per-op +// costs will skew higher. +package benchutil diff --git a/test/benchutil/envtest.go b/test/benchutil/envtest.go new file mode 100644 index 00000000000..2d1550c530a --- /dev/null +++ b/test/benchutil/envtest.go @@ -0,0 +1,155 @@ +package benchutil + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" + "testing" + + "github.com/go-logr/logr" + "github.com/gobuffalo/flect" + configv1 "github.com/openshift/api/config/v1" + machinev1beta1 "github.com/openshift/api/machine/v1beta1" + log "github.com/sirupsen/logrus" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + + hivev1 "github.com/openshift/hive/apis/hive/v1" + hivescheme "github.com/openshift/hive/pkg/util/scheme" + "k8s.io/client-go/kubernetes/scheme" +) + +var ( + testEnv *envtest.Environment + testRESTConfig *rest.Config + seedClient client.Client // initialized in envtestOnce; see newClient in client.go + envtestOnce sync.Once +) + +// EnsureEnvTest lazily starts a shared envtest environment. Torn down via StopEnvTest. +func EnsureEnvTest(b *testing.B) *rest.Config { + b.Helper() + envtestOnce.Do(func() { + // Verify KUBEBUILDER_ASSETS is set before starting envtest + if os.Getenv("KUBEBUILDER_ASSETS") == "" { + fmt.Fprintf(os.Stderr, ` + ERROR: KUBEBUILDER_ASSETS environment variable not set + + Benchmarks require envtest binaries (kube-apiserver, etcd). + + To set up: + $ make setup-envtest + + Then export the path shown, or run: + $ export KUBEBUILDER_ASSETS="$(hack/setup-envtest.sh | tail -1 | cut -d'=' -f2 | tr -d '"')" + + See test/benchmark/README.md for details. + +`) + os.Exit(1) + } + + logf.SetLogger(logr.New(logr.Discard().GetSink())) + log.SetLevel(log.ErrorLevel) + if err := hivev1.AddToScheme(scheme.Scheme); err != nil { + fmt.Fprintf(os.Stderr, "failed to add hive scheme: %v\n", err) + os.Exit(1) + } + testEnv = &envtest.Environment{ + CRDDirectoryPaths: hiveCRDPaths(), + CRDs: openshiftCRDs(), + } + var err error + testRESTConfig, err = testEnv.Start() + if err != nil { + fmt.Fprintf(os.Stderr, "failed to start test environment: %v\n", err) + os.Exit(1) + } + seedClient, err = newClient(testRESTConfig) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to create seed client: %v\n", err) + os.Exit(1) + } + }) + return testRESTConfig +} + +// StopEnvTest tears down the shared envtest environment. +func StopEnvTest() { + if testEnv != nil { + if err := testEnv.Stop(); err != nil { + fmt.Fprintf(os.Stderr, "failed to stop test environment: %v\n", err) + } + } +} + +// hiveCRDPaths returns the path to Hive's CRD manifests. +func hiveCRDPaths() []string { + out, err := exec.Command("go", "list", "-m", "-f", "{{.Dir}}", "github.com/openshift/hive").Output() + if err != nil { + fmt.Fprintf(os.Stderr, "failed to locate module root: %v\n", err) + os.Exit(1) + } + root := strings.TrimSpace(string(out)) + crdDir := filepath.Join(root, "config", "crds") + if _, err := os.Stat(crdDir); err != nil { + fmt.Fprintf(os.Stderr, "CRD directory not found at %s: %v\n", crdDir, err) + os.Exit(1) + } + return []string{crdDir} +} + +// openshiftCRDs returns minimal CRDs for OpenShift platform types. +func openshiftCRDs() []*apiextensionsv1.CustomResourceDefinition { + return []*apiextensionsv1.CustomResourceDefinition{ + crdForType(&configv1.ClusterVersion{}, apiextensionsv1.ClusterScoped), + crdForType(&configv1.ClusterOperator{}, apiextensionsv1.ClusterScoped), + crdForType(&machinev1beta1.Machine{}, apiextensionsv1.NamespaceScoped), + crdForType(&machinev1beta1.MachineSet{}, apiextensionsv1.NamespaceScoped), + } +} + +// crdForType builds a minimal CRD from a typed empty object. +func crdForType(obj runtime.Object, scope apiextensionsv1.ResourceScope) *apiextensionsv1.CustomResourceDefinition { + gvks, _, err := hivescheme.GetScheme().ObjectKinds(obj) + if err != nil || len(gvks) == 0 { + panic(fmt.Sprintf("crdForType: type not registered in scheme: %T", obj)) + } + gvk := gvks[0] + plural := strings.ToLower(flect.Pluralize(gvk.Kind)) + + return &apiextensionsv1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{Name: plural + "." + gvk.Group}, + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: gvk.Group, + Scope: scope, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: plural, + Singular: strings.ToLower(gvk.Kind), + Kind: gvk.Kind, + ListKind: gvk.Kind + "List", + }, + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{{ + Name: gvk.Version, + Served: true, + Storage: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + Type: "object", + XPreserveUnknownFields: boolPtr(true), + }, + }, + }}, + }, + } +} + +func boolPtr(b bool) *bool { return &b } diff --git a/test/benchutil/generators.go b/test/benchutil/generators.go new file mode 100644 index 00000000000..379e5fc5bcd --- /dev/null +++ b/test/benchutil/generators.go @@ -0,0 +1,79 @@ +package benchutil + +import ( + "strings" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + hivev1 "github.com/openshift/hive/apis/hive/v1" +) + +// GenerateConfigMap creates a ConfigMap with optional padding payload (0 = minimal). +func GenerateConfigMap(name string, dataSize int) *corev1.ConfigMap { + data := map[string]string{"key": "value"} + if dataSize > 0 { + data["payload"] = strings.Repeat("x", dataSize) + } + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: "default"}, + Data: data, + } +} + +// GenerateSecret creates a Secret with optional padding payload (0 = minimal). +func GenerateSecret(name string, dataSize int) *corev1.Secret { + data := map[string]string{"password": "secret"} + if dataSize > 0 { + data["payload"] = strings.Repeat("x", dataSize) + } + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: "default"}, + StringData: data, + } +} + +// GenerateDeployment creates a single-replica Deployment. +func GenerateDeployment(name string) *appsv1.Deployment { + replicas := int32(1) + return &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: "default"}, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": name}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": name}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "nginx", + Image: "nginx:1.14.2", + }}, + }, + }, + }, + } +} + +// GenerateServiceAccount creates a ServiceAccount. +func GenerateServiceAccount(name string) *corev1.ServiceAccount { + return &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: "default"}, + } +} + +// GenerateSyncSet creates a SyncSet. +func GenerateSyncSet(name string) *hivev1.SyncSet { + return &hivev1.SyncSet{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: "default"}, + Spec: hivev1.SyncSetSpec{ + ClusterDeploymentRefs: []corev1.LocalObjectReference{ + {Name: "bench-cluster"}, + }, + }, + } +} diff --git a/test/benchutil/harness.go b/test/benchutil/harness.go new file mode 100644 index 00000000000..fe7b47c81a9 --- /dev/null +++ b/test/benchutil/harness.go @@ -0,0 +1,97 @@ +package benchutil + +import ( + "context" + "fmt" + "testing" + + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/openshift/hive/pkg/resource" +) + +// Resettable is the counter interface for ControllerHarness. +type Resettable interface { + ResetAll(b *testing.B) + Report(b *testing.B) +} + +// HelperState bundles an RTCounter and a shared Helper for local-only benchmarks. +type HelperState struct { + *RTCounter + Helper resource.Helper +} + +// ControllerHarness is the central benchmark primitive. See README for details. +type ControllerHarness[S Resettable] struct { + NewObjects func(b *testing.B, ns string) []client.Object // optional; creates objects to reconcile + Setup func(b *testing.B, env *BenchEnv) S // creates per-benchmark state + // Reconcile runs one iteration. objects is the slice returned by NewObjects (nil if NewObjects is nil). + // When NewObjects returns b.N uniquely-named objects (e.g. via SingleTemplate), use objects[i]. + // When NewObjects returns a fixed set to apply every iteration, iterate over the whole slice and ignore i. + Reconcile func(b *testing.B, state S, objects []client.Object, i int) + SteadyState bool // when true, also run with pre-seeded objects +} + +// Run executes the benchmark. +func (h ControllerHarness[S]) Run(b *testing.B) { + b.Helper() + if h.SteadyState { + b.Run("FirstApply", func(b *testing.B) { h.run(b, false) }) + b.Run("SteadyState", func(b *testing.B) { h.run(b, true) }) + } else { + h.run(b, false) + } +} + +func (h ControllerHarness[S]) run(b *testing.B, seed bool) { + b.Helper() + env := NewBenchEnv(b) + var objects []client.Object + if h.NewObjects != nil { + objects = h.NewObjects(b, env.Namespace) + } + if seed { + for _, obj := range objects { + seedCopy := CopyAndSetNamespace(obj, env.Namespace) + if err := env.SeedClient.Create(context.Background(), seedCopy); err != nil { + b.Fatalf("seed failed: %v", err) + } + } + } + state := h.Setup(b, env) + + b.ReportAllocs() + state.ResetAll(b) + for i := 0; i < b.N; i++ { + h.Reconcile(b, state, objects, i) + } + state.Report(b) +} + +// SetupLocalHelper creates a shared Helper with a single RTCounter. +func SetupLocalHelper(b *testing.B, env *BenchEnv) *HelperState { + rc := TrackRoundTrips(env.Cfg) + return &HelperState{ + RTCounter: rc, + Helper: BenchHelper(b, rc.Cfg, "benchmark"), + } +} + +// SetupRemoteClient creates a BenchRemoteClient with dual counters. +func SetupRemoteClient(b *testing.B, env *BenchEnv) *BenchRemoteClient { + return NewBenchRemoteClient(b, env, "benchmark") +} + +// SingleTemplate returns a NewObjects function producing b.N uniquely-named copies. +func SingleTemplate(template client.Object) func(b *testing.B, ns string) []client.Object { + return func(b *testing.B, ns string) []client.Object { + objects := make([]client.Object, b.N) + for i := range objects { + obj := CopyAndSetNamespace(template, ns) + obj.SetName(fmt.Sprintf("%s-%d", template.GetName(), i)) + objects[i] = obj + } + return objects + } +} diff --git a/test/benchutil/helpers.go b/test/benchutil/helpers.go new file mode 100644 index 00000000000..cb6627044c9 --- /dev/null +++ b/test/benchutil/helpers.go @@ -0,0 +1,50 @@ +package benchutil + +import ( + "fmt" + "testing" + + log "github.com/sirupsen/logrus" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + + hivev1 "github.com/openshift/hive/apis/hive/v1" + "github.com/openshift/hive/pkg/resource" +) + +// BenchHelper creates a resource.Helper with suppressed logging. +func BenchHelper(b *testing.B, cfg *rest.Config, controllerName string) resource.Helper { + b.Helper() + logger := log.New() + logger.SetLevel(log.ErrorLevel) + h, err := resource.NewHelper(logger, resource.FromRESTConfig(cfg), resource.WithControllerName(hivev1.ControllerName(controllerName))) + if err != nil { + b.Fatalf("failed to create helper: %v", err) + } + return h +} + +// CopyAndSetNamespace deep-copies a client.Object and sets its namespace. +func CopyAndSetNamespace(obj client.Object, ns string) client.Object { + if obj == nil { + return nil + } + runtimeCopied := obj.DeepCopyObject() + copied, ok := runtimeCopied.(client.Object) + if !ok { + panic(fmt.Sprintf("CopyAndSetNamespace: DeepCopyObject of %T did not return a client.Object", obj)) + } + copied.SetNamespace(ns) + return copied +} + +// MustSerialize serializes a runtime.Object to JSON. Panics on error. +func MustSerialize(obj runtime.Object) []byte { + out, err := resource.Serialize(obj, scheme.Scheme) + if err != nil { + panic(fmt.Sprintf("MustSerialize: %v", err)) + } + return out +} diff --git a/test/benchutil/namespace.go b/test/benchutil/namespace.go new file mode 100644 index 00000000000..b896f4b2790 --- /dev/null +++ b/test/benchutil/namespace.go @@ -0,0 +1,42 @@ +package benchutil + +import ( + "context" + "fmt" + "os" + "sync" + "sync/atomic" + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" +) + +var ( + benchNSCounter atomic.Int64 + benchClientset *kubernetes.Clientset + benchClientOnce sync.Once +) + +// BenchNamespace creates a unique namespace for benchmark isolation. +func BenchNamespace(b *testing.B, cfg *rest.Config) string { + b.Helper() + benchClientOnce.Do(func() { + cs, err := kubernetes.NewForConfig(cfg) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to create clientset: %v\n", err) + os.Exit(1) + } + benchClientset = cs + }) + name := fmt.Sprintf("bench-%d", benchNSCounter.Add(1)) + _, err := benchClientset.CoreV1().Namespaces().Create(context.Background(), &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: name}, + }, metav1.CreateOptions{}) + if err != nil { + b.Fatalf("failed to create namespace %s: %v", name, err) + } + return name +} diff --git a/test/benchutil/roundtrip.go b/test/benchutil/roundtrip.go new file mode 100644 index 00000000000..e01acec0093 --- /dev/null +++ b/test/benchutil/roundtrip.go @@ -0,0 +1,114 @@ +package benchutil + +import ( + "io" + "net/http" + "sync/atomic" + "testing" + + "k8s.io/client-go/rest" +) + +// RTCounter counts HTTP round trips and bytes transferred. +type RTCounter struct { + Cfg *rest.Config + count atomic.Int64 + bytesSent atomic.Int64 + bytesReceived atomic.Int64 +} + +// NewRTCounter creates a standalone counter. Use WrapTransport to inject it. +func NewRTCounter() *RTCounter { + return &RTCounter{} +} + +// TrackRoundTrips returns a config copy that counts round trips and bytes. +func TrackRoundTrips(cfg *rest.Config) *RTCounter { + rc := NewRTCounter() + rc.Cfg = rest.CopyConfig(cfg) + rc.Cfg.Wrap(rc.WrapTransport) + return rc +} + +// ResetAll zeros counters and resets the benchmark timer. +func (rc *RTCounter) ResetAll(b *testing.B) { + rc.reset() + b.ResetTimer() +} + +func (rc *RTCounter) RoundTrips() int64 { return rc.count.Load() } +func (rc *RTCounter) BytesSent() int64 { return rc.bytesSent.Load() } +func (rc *RTCounter) BytesReceived() int64 { return rc.bytesReceived.Load() } + +// Report emits roundtrips/op, B-sent/op, and B-received/op. +func (rc *RTCounter) Report(b *testing.B) { + n := float64(b.N) + b.ReportMetric(float64(rc.RoundTrips())/n, "roundtrips/op") + b.ReportMetric(float64(rc.BytesSent())/n, "B-sent/op") + b.ReportMetric(float64(rc.BytesReceived())/n, "B-received/op") +} + +// ReportAs emits metrics with a prefix (e.g. "remote-roundtrips/op"). +func (rc *RTCounter) ReportAs(b *testing.B, prefix string) { + n := float64(b.N) + b.ReportMetric(float64(rc.RoundTrips())/n, prefix+"-roundtrips/op") + b.ReportMetric(float64(rc.BytesSent())/n, prefix+"-B-sent/op") + b.ReportMetric(float64(rc.BytesReceived())/n, prefix+"-B-received/op") +} + +// WrapTransport returns a counting transport wrapper. +func (rc *RTCounter) WrapTransport(rt http.RoundTripper) http.RoundTripper { + return &countingRoundTripper{ + delegate: rt, + count: &rc.count, + bytesSent: &rc.bytesSent, + bytesReceived: &rc.bytesReceived, + } +} + +func (rc *RTCounter) reset() { + rc.count.Store(0) + rc.bytesSent.Store(0) + rc.bytesReceived.Store(0) +} + +// countingRoundTripper wraps a transport to count round trips and bytes. +type countingRoundTripper struct { + delegate http.RoundTripper + count *atomic.Int64 + bytesSent *atomic.Int64 + bytesReceived *atomic.Int64 +} + +func (c *countingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + c.count.Add(1) + + // Count request body bytes. + if req.Body != nil && req.Body != http.NoBody { + req.Body = &countingReadCloser{ReadCloser: req.Body, count: c.bytesSent} + } + + resp, err := c.delegate.RoundTrip(req) + if err != nil { + return resp, err + } + + // Wrap response body to count bytes as they're read. + if resp.Body != nil { + resp.Body = &countingReadCloser{ReadCloser: resp.Body, count: c.bytesReceived} + } + + return resp, nil +} + +// countingReadCloser wraps an io.ReadCloser to count bytes read. +type countingReadCloser struct { + io.ReadCloser + count *atomic.Int64 +} + +func (c *countingReadCloser) Read(p []byte) (int, error) { + n, err := c.ReadCloser.Read(p) + c.count.Add(int64(n)) + return n, err +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 6d6536b95fc..5fc7796c1a0 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -4250,18 +4250,24 @@ sigs.k8s.io/controller-runtime/pkg/config sigs.k8s.io/controller-runtime/pkg/controller sigs.k8s.io/controller-runtime/pkg/controller/controllerutil sigs.k8s.io/controller-runtime/pkg/conversion +sigs.k8s.io/controller-runtime/pkg/envtest sigs.k8s.io/controller-runtime/pkg/event sigs.k8s.io/controller-runtime/pkg/handler sigs.k8s.io/controller-runtime/pkg/healthz sigs.k8s.io/controller-runtime/pkg/internal/controller sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics sigs.k8s.io/controller-runtime/pkg/internal/field/selector +sigs.k8s.io/controller-runtime/pkg/internal/flock sigs.k8s.io/controller-runtime/pkg/internal/httpserver sigs.k8s.io/controller-runtime/pkg/internal/log sigs.k8s.io/controller-runtime/pkg/internal/objectutil sigs.k8s.io/controller-runtime/pkg/internal/recorder sigs.k8s.io/controller-runtime/pkg/internal/source sigs.k8s.io/controller-runtime/pkg/internal/syncs +sigs.k8s.io/controller-runtime/pkg/internal/testing/addr +sigs.k8s.io/controller-runtime/pkg/internal/testing/certs +sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane +sigs.k8s.io/controller-runtime/pkg/internal/testing/process sigs.k8s.io/controller-runtime/pkg/leaderelection sigs.k8s.io/controller-runtime/pkg/log sigs.k8s.io/controller-runtime/pkg/manager @@ -4278,6 +4284,14 @@ sigs.k8s.io/controller-runtime/pkg/webhook/admission sigs.k8s.io/controller-runtime/pkg/webhook/admission/metrics sigs.k8s.io/controller-runtime/pkg/webhook/conversion sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics +# sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240927101401-4381fa0aeee4 +## explicit; go 1.22.0 +sigs.k8s.io/controller-runtime/tools/setup-envtest +sigs.k8s.io/controller-runtime/tools/setup-envtest/env +sigs.k8s.io/controller-runtime/tools/setup-envtest/remote +sigs.k8s.io/controller-runtime/tools/setup-envtest/store +sigs.k8s.io/controller-runtime/tools/setup-envtest/versions +sigs.k8s.io/controller-runtime/tools/setup-envtest/workflows # sigs.k8s.io/controller-tools v0.19.0 ## explicit; go 1.24.0 sigs.k8s.io/controller-tools/cmd/controller-gen diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/crd.go b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/crd.go new file mode 100644 index 00000000000..5fdd657cd7f --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/crd.go @@ -0,0 +1,465 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package envtest + +import ( + "bufio" + "bytes" + "context" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "time" + + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + k8syaml "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/util/retry" + "k8s.io/utils/ptr" + "sigs.k8s.io/yaml" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/webhook/conversion" +) + +// CRDInstallOptions are the options for installing CRDs. +type CRDInstallOptions struct { + // Scheme is used to determine if conversion webhooks should be enabled + // for a particular CRD / object. + // + // Conversion webhooks are going to be enabled if an object in the scheme + // implements Hub and Spoke conversions. + // + // If nil, scheme.Scheme is used. + Scheme *runtime.Scheme + + // Paths is a list of paths to the directories or files containing CRDs + Paths []string + + // CRDs is a list of CRDs to install + CRDs []*apiextensionsv1.CustomResourceDefinition + + // ErrorIfPathMissing will cause an error if a Path does not exist + ErrorIfPathMissing bool + + // MaxTime is the max time to wait + MaxTime time.Duration + + // PollInterval is the interval to check + PollInterval time.Duration + + // CleanUpAfterUse will cause the CRDs listed for installation to be + // uninstalled when terminating the test environment. + // Defaults to false. + CleanUpAfterUse bool + + // WebhookOptions contains the conversion webhook information to install + // on the CRDs. This field is usually inherited by the EnvTest options. + // + // If you're passing this field manually, you need to make sure that + // the CA information and host port is filled in properly. + WebhookOptions WebhookInstallOptions +} + +const ( + defaultPollInterval = 100 * time.Millisecond + defaultMaxWait = 10 * time.Second +) + +// InstallCRDs installs a collection of CRDs into a cluster by reading the crd yaml files from a directory. +func InstallCRDs(config *rest.Config, options CRDInstallOptions) ([]*apiextensionsv1.CustomResourceDefinition, error) { + defaultCRDOptions(&options) + + // Read the CRD yamls into options.CRDs + if err := readCRDFiles(&options); err != nil { + return nil, fmt.Errorf("unable to read CRD files: %w", err) + } + + if err := modifyConversionWebhooks(options.CRDs, options.Scheme, options.WebhookOptions); err != nil { + return nil, err + } + + // Create the CRDs in the apiserver + if err := CreateCRDs(config, options.CRDs); err != nil { + return options.CRDs, fmt.Errorf("unable to create CRD instances: %w", err) + } + + // Wait for the CRDs to appear as Resources in the apiserver + if err := WaitForCRDs(config, options.CRDs, options); err != nil { + return options.CRDs, fmt.Errorf("something went wrong waiting for CRDs to appear as API resources: %w", err) + } + + return options.CRDs, nil +} + +// readCRDFiles reads the directories of CRDs in options.Paths and adds the CRD structs to options.CRDs. +func readCRDFiles(options *CRDInstallOptions) error { + if len(options.Paths) > 0 { + crdList, err := renderCRDs(options) + if err != nil { + return err + } + + options.CRDs = append(options.CRDs, crdList...) + } + return nil +} + +// defaultCRDOptions sets the default values for CRDs. +func defaultCRDOptions(o *CRDInstallOptions) { + if o.Scheme == nil { + o.Scheme = scheme.Scheme + } + if o.MaxTime == 0 { + o.MaxTime = defaultMaxWait + } + if o.PollInterval == 0 { + o.PollInterval = defaultPollInterval + } +} + +// WaitForCRDs waits for the CRDs to appear in discovery. +func WaitForCRDs(config *rest.Config, crds []*apiextensionsv1.CustomResourceDefinition, options CRDInstallOptions) error { + // Add each CRD to a map of GroupVersion to Resource + waitingFor := map[schema.GroupVersion]*sets.Set[string]{} + for _, crd := range crds { + gvs := []schema.GroupVersion{} + for _, version := range crd.Spec.Versions { + if version.Served { + gvs = append(gvs, schema.GroupVersion{Group: crd.Spec.Group, Version: version.Name}) + } + } + + for _, gv := range gvs { + log.V(1).Info("adding API in waitlist", "GV", gv) + if _, found := waitingFor[gv]; !found { + // Initialize the set + waitingFor[gv] = &sets.Set[string]{} + } + // Add the Resource + waitingFor[gv].Insert(crd.Spec.Names.Plural) + } + } + + // Poll until all resources are found in discovery + p := &poller{config: config, waitingFor: waitingFor} + return wait.PollUntilContextTimeout(context.TODO(), options.PollInterval, options.MaxTime, true, p.poll) +} + +// poller checks if all the resources have been found in discovery, and returns false if not. +type poller struct { + // config is used to get discovery + config *rest.Config + + // waitingFor is the map of resources keyed by group version that have not yet been found in discovery + waitingFor map[schema.GroupVersion]*sets.Set[string] +} + +// poll checks if all the resources have been found in discovery, and returns false if not. +func (p *poller) poll(ctx context.Context) (done bool, err error) { + // Create a new clientset to avoid any client caching of discovery + cs, err := clientset.NewForConfig(p.config) + if err != nil { + return false, err + } + + allFound := true + for gv, resources := range p.waitingFor { + // All resources found, do nothing + if resources.Len() == 0 { + delete(p.waitingFor, gv) + continue + } + + // Get the Resources for this GroupVersion + // TODO: Maybe the controller-runtime client should be able to do this... + resourceList, err := cs.Discovery().ServerResourcesForGroupVersion(gv.Group + "/" + gv.Version) + if err != nil { + return false, nil //nolint:nilerr + } + + // Remove each found resource from the resources set that we are waiting for + for _, resource := range resourceList.APIResources { + resources.Delete(resource.Name) + } + + // Still waiting on some resources in this group version + if resources.Len() != 0 { + allFound = false + } + } + return allFound, nil +} + +// UninstallCRDs uninstalls a collection of CRDs by reading the crd yaml files from a directory. +func UninstallCRDs(config *rest.Config, options CRDInstallOptions) error { + // Read the CRD yamls into options.CRDs + if err := readCRDFiles(&options); err != nil { + return err + } + + // Delete the CRDs from the apiserver + cs, err := client.New(config, client.Options{}) + if err != nil { + return err + } + + // Uninstall each CRD + for _, crd := range options.CRDs { + crd := crd + log.V(1).Info("uninstalling CRD", "crd", crd.GetName()) + if err := cs.Delete(context.TODO(), crd); err != nil { + // If CRD is not found, we can consider success + if !apierrors.IsNotFound(err) { + return err + } + } + } + + return nil +} + +// CreateCRDs creates the CRDs. +func CreateCRDs(config *rest.Config, crds []*apiextensionsv1.CustomResourceDefinition) error { + cs, err := client.New(config, client.Options{}) + if err != nil { + return fmt.Errorf("unable to create client: %w", err) + } + + // Create each CRD + for _, crd := range crds { + crd := crd + log.V(1).Info("installing CRD", "crd", crd.GetName()) + existingCrd := crd.DeepCopy() + err := cs.Get(context.TODO(), client.ObjectKey{Name: crd.GetName()}, existingCrd) + switch { + case apierrors.IsNotFound(err): + if err := cs.Create(context.TODO(), crd); err != nil { + return fmt.Errorf("unable to create CRD %q: %w", crd.GetName(), err) + } + case err != nil: + return fmt.Errorf("unable to get CRD %q to check if it exists: %w", crd.GetName(), err) + default: + log.V(1).Info("CRD already exists, updating", "crd", crd.GetName()) + if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := cs.Get(context.TODO(), client.ObjectKey{Name: crd.GetName()}, existingCrd); err != nil { + return err + } + crd.SetResourceVersion(existingCrd.GetResourceVersion()) + return cs.Update(context.TODO(), crd) + }); err != nil { + return err + } + } + } + return nil +} + +// renderCRDs iterate through options.Paths and extract all CRD files. +func renderCRDs(options *CRDInstallOptions) ([]*apiextensionsv1.CustomResourceDefinition, error) { + type GVKN struct { + GVK schema.GroupVersionKind + Name string + } + + crds := map[GVKN]*apiextensionsv1.CustomResourceDefinition{} + + for _, path := range options.Paths { + var ( + err error + info os.FileInfo + files []string + filePath = path + ) + + // Return the error if ErrorIfPathMissing exists + if info, err = os.Stat(path); os.IsNotExist(err) { + if options.ErrorIfPathMissing { + return nil, err + } + continue + } + + if !info.IsDir() { + filePath, files = filepath.Dir(path), []string{info.Name()} + } else { + entries, err := os.ReadDir(path) + if err != nil { + return nil, err + } + for _, e := range entries { + files = append(files, e.Name()) + } + } + + log.V(1).Info("reading CRDs from path", "path", path) + crdList, err := readCRDs(filePath, files) + if err != nil { + return nil, err + } + + for i, crd := range crdList { + gvkn := GVKN{GVK: crd.GroupVersionKind(), Name: crd.GetName()} + if _, found := crds[gvkn]; found { + // Currently, we only print a log when there are duplicates. We may want to error out if that makes more sense. + log.Info("there are more than one CRD definitions with the same ", "GVKN", gvkn) + } + // We always use the CRD definition that we found last. + crds[gvkn] = crdList[i] + } + } + + // Converting map to a list to return + res := []*apiextensionsv1.CustomResourceDefinition{} + for _, obj := range crds { + res = append(res, obj) + } + return res, nil +} + +// modifyConversionWebhooks takes all the registered CustomResourceDefinitions and applies modifications +// to conditionally enable webhooks if the type is registered within the scheme. +func modifyConversionWebhooks(crds []*apiextensionsv1.CustomResourceDefinition, scheme *runtime.Scheme, webhookOptions WebhookInstallOptions) error { + if len(webhookOptions.LocalServingCAData) == 0 { + return nil + } + + // Determine all registered convertible types. + convertibles := map[schema.GroupKind]struct{}{} + for gvk := range scheme.AllKnownTypes() { + obj, err := scheme.New(gvk) + if err != nil { + return err + } + if ok, err := conversion.IsConvertible(scheme, obj); ok && err == nil { + convertibles[gvk.GroupKind()] = struct{}{} + } + } + + // generate host port. + hostPort, err := webhookOptions.generateHostPort() + if err != nil { + return err + } + url := ptr.To(fmt.Sprintf("https://%s/convert", hostPort)) + + for i := range crds { + // Continue if we're preserving unknown fields. + if crds[i].Spec.PreserveUnknownFields { + continue + } + if !webhookOptions.IgnoreSchemeConvertible { + // Continue if the GroupKind isn't registered as being convertible, + // and remove any existing conversion webhooks if they exist. + // This is to prevent the CRD from being rejected by the apiserver, usually + // manifests that are generated by controller-gen will have a conversion + // webhook set, but we don't want to enable it if the type isn't registered. + if _, ok := convertibles[schema.GroupKind{ + Group: crds[i].Spec.Group, + Kind: crds[i].Spec.Names.Kind, + }]; !ok { + crds[i].Spec.Conversion = nil + continue + } + } + if crds[i].Spec.Conversion == nil { + crds[i].Spec.Conversion = &apiextensionsv1.CustomResourceConversion{ + Webhook: &apiextensionsv1.WebhookConversion{}, + } + } + crds[i].Spec.Conversion.Strategy = apiextensionsv1.WebhookConverter + crds[i].Spec.Conversion.Webhook.ConversionReviewVersions = []string{"v1", "v1beta1"} + crds[i].Spec.Conversion.Webhook.ClientConfig = &apiextensionsv1.WebhookClientConfig{ + Service: nil, + URL: url, + CABundle: webhookOptions.LocalServingCAData, + } + } + + return nil +} + +// readCRDs reads the CRDs from files and Unmarshals them into structs. +func readCRDs(basePath string, files []string) ([]*apiextensionsv1.CustomResourceDefinition, error) { + var crds []*apiextensionsv1.CustomResourceDefinition + + // White list the file extensions that may contain CRDs + crdExts := sets.NewString(".json", ".yaml", ".yml") + + for _, file := range files { + // Only parse allowlisted file types + if !crdExts.Has(filepath.Ext(file)) { + continue + } + + // Unmarshal CRDs from file into structs + docs, err := readDocuments(filepath.Join(basePath, file)) + if err != nil { + return nil, err + } + + for _, doc := range docs { + crd := &apiextensionsv1.CustomResourceDefinition{} + if err = yaml.Unmarshal(doc, crd); err != nil { + return nil, err + } + + if crd.Kind != "CustomResourceDefinition" || crd.Spec.Names.Kind == "" || crd.Spec.Group == "" { + continue + } + crds = append(crds, crd) + } + + log.V(1).Info("read CRDs from file", "file", file) + } + return crds, nil +} + +// readDocuments reads documents from file. +func readDocuments(fp string) ([][]byte, error) { + b, err := os.ReadFile(fp) + if err != nil { + return nil, err + } + + docs := [][]byte{} + reader := k8syaml.NewYAMLReader(bufio.NewReader(bytes.NewReader(b))) + for { + // Read document + doc, err := reader.Read() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + + return nil, err + } + + docs = append(docs, doc) + } + + return docs, nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/doc.go new file mode 100644 index 00000000000..412e794cc8c --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/doc.go @@ -0,0 +1,26 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package envtest provides libraries for integration testing by starting a local control plane +// +// Control plane binaries (etcd and kube-apiserver) are loaded by default from +// /usr/local/kubebuilder/bin. This can be overridden by setting the +// KUBEBUILDER_ASSETS environment variable, or by directly creating a +// ControlPlane for the Environment to use. +// +// Environment can also be configured to work with an existing cluster, and +// simply load CRDs and provide client configuration. +package envtest diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/helper.go b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/helper.go new file mode 100644 index 00000000000..d3b52017d23 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/helper.go @@ -0,0 +1,69 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package envtest + +import ( + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/client-go/kubernetes/scheme" +) + +var ( + crdScheme = scheme.Scheme +) + +// init is required to correctly initialize the crdScheme package variable. +func init() { + _ = apiextensionsv1.AddToScheme(crdScheme) +} + +// mergePaths merges two string slices containing paths. +// This function makes no guarantees about order of the merged slice. +func mergePaths(s1, s2 []string) []string { + m := make(map[string]struct{}) + for _, s := range s1 { + m[s] = struct{}{} + } + for _, s := range s2 { + m[s] = struct{}{} + } + merged := make([]string, len(m)) + i := 0 + for key := range m { + merged[i] = key + i++ + } + return merged +} + +// mergeCRDs merges two CRD slices using their names. +// This function makes no guarantees about order of the merged slice. +func mergeCRDs(s1, s2 []*apiextensionsv1.CustomResourceDefinition) []*apiextensionsv1.CustomResourceDefinition { + m := make(map[string]*apiextensionsv1.CustomResourceDefinition) + for _, obj := range s1 { + m[obj.GetName()] = obj + } + for _, obj := range s2 { + m[obj.GetName()] = obj + } + merged := make([]*apiextensionsv1.CustomResourceDefinition, len(m)) + i := 0 + for _, obj := range m { + merged[i] = obj.DeepCopy() + i++ + } + return merged +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/server.go b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/server.go new file mode 100644 index 00000000000..85436576456 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/server.go @@ -0,0 +1,387 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package envtest + +import ( + "context" + "fmt" + "os" + "strings" + "time" + + corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + + "sigs.k8s.io/controller-runtime/pkg/client/config" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/process" +) + +var log = logf.RuntimeLog.WithName("test-env") + +/* +It's possible to override some defaults, by setting the following environment variables: +* USE_EXISTING_CLUSTER (boolean): if set to true, envtest will use an existing cluster +* TEST_ASSET_KUBE_APISERVER (string): path to the api-server binary to use +* TEST_ASSET_ETCD (string): path to the etcd binary to use +* TEST_ASSET_KUBECTL (string): path to the kubectl binary to use +* KUBEBUILDER_ASSETS (string): directory containing the binaries to use (api-server, etcd and kubectl). Defaults to /usr/local/kubebuilder/bin. +* KUBEBUILDER_CONTROLPLANE_START_TIMEOUT (string supported by time.ParseDuration): timeout for test control plane to start. Defaults to 20s. +* KUBEBUILDER_CONTROLPLANE_STOP_TIMEOUT (string supported by time.ParseDuration): timeout for test control plane to start. Defaults to 20s. +* KUBEBUILDER_ATTACH_CONTROL_PLANE_OUTPUT (boolean): if set to true, the control plane's stdout and stderr are attached to os.Stdout and os.Stderr +*/ +const ( + envUseExistingCluster = "USE_EXISTING_CLUSTER" + envStartTimeout = "KUBEBUILDER_CONTROLPLANE_START_TIMEOUT" + envStopTimeout = "KUBEBUILDER_CONTROLPLANE_STOP_TIMEOUT" + envAttachOutput = "KUBEBUILDER_ATTACH_CONTROL_PLANE_OUTPUT" + StartTimeout = 60 + StopTimeout = 60 + + defaultKubebuilderControlPlaneStartTimeout = 20 * time.Second + defaultKubebuilderControlPlaneStopTimeout = 20 * time.Second +) + +// internal types we expose as part of our public API. +type ( + // ControlPlane is the re-exported ControlPlane type from the internal testing package. + ControlPlane = controlplane.ControlPlane + + // APIServer is the re-exported APIServer from the internal testing package. + APIServer = controlplane.APIServer + + // Etcd is the re-exported Etcd from the internal testing package. + Etcd = controlplane.Etcd + + // User represents a Kubernetes user to provision for auth purposes. + User = controlplane.User + + // AuthenticatedUser represets a Kubernetes user that's been provisioned. + AuthenticatedUser = controlplane.AuthenticatedUser + + // ListenAddr indicates the address and port that the API server should listen on. + ListenAddr = process.ListenAddr + + // SecureServing contains details describing how the API server should serve + // its secure endpoint. + SecureServing = controlplane.SecureServing + + // Authn is an authentication method that can be used with the control plane to + // provision users. + Authn = controlplane.Authn + + // Arguments allows configuring a process's flags. + Arguments = process.Arguments + + // Arg is a single flag with one or more values. + Arg = process.Arg +) + +var ( + // EmptyArguments constructs a new set of flags with nothing set. + // + // This is mostly useful for testing helper methods -- you'll want to call + // Configure on the APIServer (or etcd) to configure their arguments. + EmptyArguments = process.EmptyArguments +) + +// Environment creates a Kubernetes test environment that will start / stop the Kubernetes control plane and +// install extension APIs. +type Environment struct { + // ControlPlane is the ControlPlane including the apiserver and etcd + ControlPlane controlplane.ControlPlane + + // Scheme is used to determine if conversion webhooks should be enabled + // for a particular CRD / object. + // + // Conversion webhooks are going to be enabled if an object in the scheme + // implements Hub and Spoke conversions. + // + // If nil, scheme.Scheme is used. + Scheme *runtime.Scheme + + // Config can be used to talk to the apiserver. It's automatically + // populated if not set using the standard controller-runtime config + // loading. + Config *rest.Config + + // CRDInstallOptions are the options for installing CRDs. + CRDInstallOptions CRDInstallOptions + + // WebhookInstallOptions are the options for installing webhooks. + WebhookInstallOptions WebhookInstallOptions + + // ErrorIfCRDPathMissing provides an interface for the underlying + // CRDInstallOptions.ErrorIfPathMissing. It prevents silent failures + // for missing CRD paths. + ErrorIfCRDPathMissing bool + + // CRDs is a list of CRDs to install. + // If both this field and CRDs field in CRDInstallOptions are specified, the + // values are merged. + CRDs []*apiextensionsv1.CustomResourceDefinition + + // CRDDirectoryPaths is a list of paths containing CRD yaml or json configs. + // If both this field and Paths field in CRDInstallOptions are specified, the + // values are merged. + CRDDirectoryPaths []string + + // BinaryAssetsDirectory is the path where the binaries required for the envtest are + // located in the local environment. This field can be overridden by setting KUBEBUILDER_ASSETS. + BinaryAssetsDirectory string + + // UseExistingCluster indicates that this environments should use an + // existing kubeconfig, instead of trying to stand up a new control plane. + // This is useful in cases that need aggregated API servers and the like. + UseExistingCluster *bool + + // ControlPlaneStartTimeout is the maximum duration each controlplane component + // may take to start. It defaults to the KUBEBUILDER_CONTROLPLANE_START_TIMEOUT + // environment variable or 20 seconds if unspecified + ControlPlaneStartTimeout time.Duration + + // ControlPlaneStopTimeout is the maximum duration each controlplane component + // may take to stop. It defaults to the KUBEBUILDER_CONTROLPLANE_STOP_TIMEOUT + // environment variable or 20 seconds if unspecified + ControlPlaneStopTimeout time.Duration + + // AttachControlPlaneOutput indicates if control plane output will be attached to os.Stdout and os.Stderr. + // Enable this to get more visibility of the testing control plane. + // It respect KUBEBUILDER_ATTACH_CONTROL_PLANE_OUTPUT environment variable. + AttachControlPlaneOutput bool +} + +// Stop stops a running server. +// Previously installed CRDs, as listed in CRDInstallOptions.CRDs, will be uninstalled +// if CRDInstallOptions.CleanUpAfterUse are set to true. +func (te *Environment) Stop() error { + if te.CRDInstallOptions.CleanUpAfterUse { + if err := UninstallCRDs(te.Config, te.CRDInstallOptions); err != nil { + return err + } + } + + if err := te.WebhookInstallOptions.Cleanup(); err != nil { + return err + } + + if te.useExistingCluster() { + return nil + } + + return te.ControlPlane.Stop() +} + +// Start starts a local Kubernetes server and updates te.ApiserverPort with the port it is listening on. +func (te *Environment) Start() (*rest.Config, error) { + if te.useExistingCluster() { + log.V(1).Info("using existing cluster") + if te.Config == nil { + // we want to allow people to pass in their own config, so + // only load a config if it hasn't already been set. + log.V(1).Info("automatically acquiring client configuration") + + var err error + te.Config, err = config.GetConfig() + if err != nil { + return nil, fmt.Errorf("unable to get configuration for existing cluster: %w", err) + } + } + } else { + apiServer := te.ControlPlane.GetAPIServer() + + if te.ControlPlane.Etcd == nil { + te.ControlPlane.Etcd = &controlplane.Etcd{} + } + + if os.Getenv(envAttachOutput) == "true" { + te.AttachControlPlaneOutput = true + } + if te.AttachControlPlaneOutput { + if apiServer.Out == nil { + apiServer.Out = os.Stdout + } + if apiServer.Err == nil { + apiServer.Err = os.Stderr + } + if te.ControlPlane.Etcd.Out == nil { + te.ControlPlane.Etcd.Out = os.Stdout + } + if te.ControlPlane.Etcd.Err == nil { + te.ControlPlane.Etcd.Err = os.Stderr + } + } + + apiServer.Path = process.BinPathFinder("kube-apiserver", te.BinaryAssetsDirectory) + te.ControlPlane.Etcd.Path = process.BinPathFinder("etcd", te.BinaryAssetsDirectory) + te.ControlPlane.KubectlPath = process.BinPathFinder("kubectl", te.BinaryAssetsDirectory) + + if err := te.defaultTimeouts(); err != nil { + return nil, fmt.Errorf("failed to default controlplane timeouts: %w", err) + } + te.ControlPlane.Etcd.StartTimeout = te.ControlPlaneStartTimeout + te.ControlPlane.Etcd.StopTimeout = te.ControlPlaneStopTimeout + apiServer.StartTimeout = te.ControlPlaneStartTimeout + apiServer.StopTimeout = te.ControlPlaneStopTimeout + + log.V(1).Info("starting control plane") + if err := te.startControlPlane(); err != nil { + return nil, fmt.Errorf("unable to start control plane itself: %w", err) + } + + // Create the *rest.Config for creating new clients + baseConfig := &rest.Config{ + // gotta go fast during tests -- we don't really care about overwhelming our test API server + QPS: 1000.0, + Burst: 2000.0, + } + + adminInfo := User{Name: "admin", Groups: []string{"system:masters"}} + adminUser, err := te.ControlPlane.AddUser(adminInfo, baseConfig) + if err != nil { + return te.Config, fmt.Errorf("unable to provision admin user: %w", err) + } + te.Config = adminUser.Config() + } + + // Set the default scheme if nil. + if te.Scheme == nil { + te.Scheme = scheme.Scheme + } + + // If we are bringing etcd up for the first time, it can take some time for the + // default namespace to actually be created and seen as available to the apiserver + if err := te.waitForDefaultNamespace(te.Config); err != nil { + return nil, fmt.Errorf("default namespace didn't register within deadline: %w", err) + } + + // Call PrepWithoutInstalling to setup certificates first + // and have them available to patch CRD conversion webhook as well. + if err := te.WebhookInstallOptions.PrepWithoutInstalling(); err != nil { + return nil, err + } + + log.V(1).Info("installing CRDs") + if te.CRDInstallOptions.Scheme == nil { + te.CRDInstallOptions.Scheme = te.Scheme + } + te.CRDInstallOptions.CRDs = mergeCRDs(te.CRDInstallOptions.CRDs, te.CRDs) + te.CRDInstallOptions.Paths = mergePaths(te.CRDInstallOptions.Paths, te.CRDDirectoryPaths) + te.CRDInstallOptions.ErrorIfPathMissing = te.ErrorIfCRDPathMissing + te.CRDInstallOptions.WebhookOptions = te.WebhookInstallOptions + crds, err := InstallCRDs(te.Config, te.CRDInstallOptions) + if err != nil { + return te.Config, fmt.Errorf("unable to install CRDs onto control plane: %w", err) + } + te.CRDs = crds + + log.V(1).Info("installing webhooks") + if err := te.WebhookInstallOptions.Install(te.Config); err != nil { + return nil, fmt.Errorf("unable to install webhooks onto control plane: %w", err) + } + return te.Config, nil +} + +// AddUser provisions a new user for connecting to this Environment. The user will +// have the specified name & belong to the specified groups. +// +// If you specify a "base" config, the returned REST Config will contain those +// settings as well as any required by the authentication method. You can use +// this to easily specify options like QPS. +// +// This is effectively a convinience alias for ControlPlane.AddUser -- see that +// for more low-level details. +func (te *Environment) AddUser(user User, baseConfig *rest.Config) (*AuthenticatedUser, error) { + return te.ControlPlane.AddUser(user, baseConfig) +} + +func (te *Environment) startControlPlane() error { + numTries, maxRetries := 0, 5 + var err error + for ; numTries < maxRetries; numTries++ { + // Start the control plane - retry if it fails + err = te.ControlPlane.Start() + if err == nil { + break + } + log.Error(err, "unable to start the controlplane", "tries", numTries) + } + if numTries == maxRetries { + return fmt.Errorf("failed to start the controlplane. retried %d times: %w", numTries, err) + } + return nil +} + +func (te *Environment) waitForDefaultNamespace(config *rest.Config) error { + cs, err := client.New(config, client.Options{}) + if err != nil { + return fmt.Errorf("unable to create client: %w", err) + } + // It shouldn't take longer than 5s for the default namespace to be brought up in etcd + return wait.PollUntilContextTimeout(context.TODO(), time.Millisecond*50, time.Second*5, true, func(ctx context.Context) (bool, error) { + if err = cs.Get(ctx, types.NamespacedName{Name: "default"}, &corev1.Namespace{}); err != nil { + return false, nil //nolint:nilerr + } + return true, nil + }) +} + +func (te *Environment) defaultTimeouts() error { + var err error + if te.ControlPlaneStartTimeout == 0 { + if envVal := os.Getenv(envStartTimeout); envVal != "" { + te.ControlPlaneStartTimeout, err = time.ParseDuration(envVal) + if err != nil { + return err + } + } else { + te.ControlPlaneStartTimeout = defaultKubebuilderControlPlaneStartTimeout + } + } + + if te.ControlPlaneStopTimeout == 0 { + if envVal := os.Getenv(envStopTimeout); envVal != "" { + te.ControlPlaneStopTimeout, err = time.ParseDuration(envVal) + if err != nil { + return err + } + } else { + te.ControlPlaneStopTimeout = defaultKubebuilderControlPlaneStopTimeout + } + } + return nil +} + +func (te *Environment) useExistingCluster() bool { + if te.UseExistingCluster == nil { + return strings.ToLower(os.Getenv(envUseExistingCluster)) == "true" + } + return *te.UseExistingCluster +} + +// DefaultKubeAPIServerFlags exposes the default args for the APIServer so that +// you can use those to append your own additional arguments. +// +// Deprecated: use APIServer.Configure() instead. +var DefaultKubeAPIServerFlags = controlplane.APIServerDefaultArgs //nolint:staticcheck diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/webhook.go b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/webhook.go new file mode 100644 index 00000000000..e4e54e472c7 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/webhook.go @@ -0,0 +1,451 @@ +/* +Copyright 2019 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package envtest + +import ( + "context" + "fmt" + "net" + "os" + "path/filepath" + "time" + + admissionv1 "k8s.io/api/admissionregistration/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/yaml" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/addr" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/certs" +) + +// WebhookInstallOptions are the options for installing mutating or validating webhooks. +type WebhookInstallOptions struct { + // Paths is a list of paths to the directories or files containing the mutating or validating webhooks yaml or json configs. + Paths []string + + // MutatingWebhooks is a list of MutatingWebhookConfigurations to install + MutatingWebhooks []*admissionv1.MutatingWebhookConfiguration + + // ValidatingWebhooks is a list of ValidatingWebhookConfigurations to install + ValidatingWebhooks []*admissionv1.ValidatingWebhookConfiguration + + // IgnoreSchemeConvertible, will modify any CRD conversion webhook to use the local serving host and port, + // bypassing the need to have the types registered in the Scheme. This is useful for testing CRD conversion webhooks + // with unregistered or unstructured types. + IgnoreSchemeConvertible bool + + // IgnoreErrorIfPathMissing will ignore an error if a DirectoryPath does not exist when set to true + IgnoreErrorIfPathMissing bool + + // LocalServingHost is the host for serving webhooks on. + // it will be automatically populated + LocalServingHost string + + // LocalServingPort is the allocated port for serving webhooks on. + // it will be automatically populated by a random available local port + LocalServingPort int + + // LocalServingCertDir is the allocated directory for serving certificates. + // it will be automatically populated by the local temp dir + LocalServingCertDir string + + // CAData is the CA that can be used to trust the serving certificates in LocalServingCertDir. + LocalServingCAData []byte + + // LocalServingHostExternalName is the hostname to use to reach the webhook server. + LocalServingHostExternalName string + + // MaxTime is the max time to wait + MaxTime time.Duration + + // PollInterval is the interval to check + PollInterval time.Duration +} + +// ModifyWebhookDefinitions modifies webhook definitions by: +// - applying CABundle based on the provided tinyca +// - if webhook client config uses service spec, it's removed and replaced with direct url. +func (o *WebhookInstallOptions) ModifyWebhookDefinitions() error { + caData := o.LocalServingCAData + + // generate host port. + hostPort, err := o.generateHostPort() + if err != nil { + return err + } + + for i := range o.MutatingWebhooks { + for j := range o.MutatingWebhooks[i].Webhooks { + updateClientConfig(&o.MutatingWebhooks[i].Webhooks[j].ClientConfig, hostPort, caData) + } + } + + for i := range o.ValidatingWebhooks { + for j := range o.ValidatingWebhooks[i].Webhooks { + updateClientConfig(&o.ValidatingWebhooks[i].Webhooks[j].ClientConfig, hostPort, caData) + } + } + return nil +} + +func updateClientConfig(cc *admissionv1.WebhookClientConfig, hostPort string, caData []byte) { + cc.CABundle = caData + if cc.Service != nil && cc.Service.Path != nil { + url := fmt.Sprintf("https://%s/%s", hostPort, *cc.Service.Path) + cc.URL = &url + cc.Service = nil + } +} + +func (o *WebhookInstallOptions) generateHostPort() (string, error) { + if o.LocalServingPort == 0 { + port, host, err := addr.Suggest(o.LocalServingHost) + if err != nil { + return "", fmt.Errorf("unable to grab random port for serving webhooks on: %w", err) + } + o.LocalServingPort = port + o.LocalServingHost = host + } + host := o.LocalServingHostExternalName + if host == "" { + host = o.LocalServingHost + } + return net.JoinHostPort(host, fmt.Sprintf("%d", o.LocalServingPort)), nil +} + +// PrepWithoutInstalling does the setup parts of Install (populating host-port, +// setting up CAs, etc), without actually truing to do anything with webhook +// definitions. This is largely useful for internal testing of +// controller-runtime, where we need a random host-port & caData for webhook +// tests, but may be useful in similar scenarios. +func (o *WebhookInstallOptions) PrepWithoutInstalling() error { + if err := o.setupCA(); err != nil { + return err + } + + if err := parseWebhook(o); err != nil { + return err + } + + return o.ModifyWebhookDefinitions() +} + +// Install installs specified webhooks to the API server. +func (o *WebhookInstallOptions) Install(config *rest.Config) error { + defaultWebhookOptions(o) + + if len(o.LocalServingCAData) == 0 { + if err := o.PrepWithoutInstalling(); err != nil { + return err + } + } + + if err := createWebhooks(config, o.MutatingWebhooks, o.ValidatingWebhooks); err != nil { + return err + } + + return WaitForWebhooks(config, o.MutatingWebhooks, o.ValidatingWebhooks, *o) +} + +// Cleanup cleans up cert directories. +func (o *WebhookInstallOptions) Cleanup() error { + if o.LocalServingCertDir != "" { + return os.RemoveAll(o.LocalServingCertDir) + } + return nil +} + +// defaultWebhookOptions sets the default values for Webhooks. +func defaultWebhookOptions(o *WebhookInstallOptions) { + if o.MaxTime == 0 { + o.MaxTime = defaultMaxWait + } + if o.PollInterval == 0 { + o.PollInterval = defaultPollInterval + } +} + +// WaitForWebhooks waits for the Webhooks to be available through API server. +func WaitForWebhooks(config *rest.Config, + mutatingWebhooks []*admissionv1.MutatingWebhookConfiguration, + validatingWebhooks []*admissionv1.ValidatingWebhookConfiguration, + options WebhookInstallOptions, +) error { + waitingFor := map[schema.GroupVersionKind]*sets.Set[string]{} + + for _, hook := range mutatingWebhooks { + h := hook + gvk, err := apiutil.GVKForObject(h, scheme.Scheme) + if err != nil { + return fmt.Errorf("unable to get gvk for MutatingWebhookConfiguration %s: %w", hook.GetName(), err) + } + + if _, ok := waitingFor[gvk]; !ok { + waitingFor[gvk] = &sets.Set[string]{} + } + waitingFor[gvk].Insert(h.GetName()) + } + + for _, hook := range validatingWebhooks { + h := hook + gvk, err := apiutil.GVKForObject(h, scheme.Scheme) + if err != nil { + return fmt.Errorf("unable to get gvk for ValidatingWebhookConfiguration %s: %w", hook.GetName(), err) + } + + if _, ok := waitingFor[gvk]; !ok { + waitingFor[gvk] = &sets.Set[string]{} + } + waitingFor[gvk].Insert(hook.GetName()) + } + + // Poll until all resources are found in discovery + p := &webhookPoller{config: config, waitingFor: waitingFor} + return wait.PollUntilContextTimeout(context.TODO(), options.PollInterval, options.MaxTime, true, p.poll) +} + +// poller checks if all the resources have been found in discovery, and returns false if not. +type webhookPoller struct { + // config is used to get discovery + config *rest.Config + + // waitingFor is the map of resources keyed by group version that have not yet been found in discovery + waitingFor map[schema.GroupVersionKind]*sets.Set[string] +} + +// poll checks if all the resources have been found in discovery, and returns false if not. +func (p *webhookPoller) poll(ctx context.Context) (done bool, err error) { + // Create a new clientset to avoid any client caching of discovery + c, err := client.New(p.config, client.Options{}) + if err != nil { + return false, err + } + + allFound := true + for gvk, names := range p.waitingFor { + if names.Len() == 0 { + delete(p.waitingFor, gvk) + continue + } + for _, name := range names.UnsortedList() { + obj := &unstructured.Unstructured{} + obj.SetGroupVersionKind(gvk) + err := c.Get(context.Background(), client.ObjectKey{ + Namespace: "", + Name: name, + }, obj) + + if err == nil { + names.Delete(name) + } + + if apierrors.IsNotFound(err) { + allFound = false + } + if err != nil { + return false, err + } + } + } + return allFound, nil +} + +// setupCA creates CA for testing and writes them to disk. +func (o *WebhookInstallOptions) setupCA() error { + hookCA, err := certs.NewTinyCA() + if err != nil { + return fmt.Errorf("unable to set up webhook CA: %w", err) + } + + names := []string{"localhost", o.LocalServingHost, o.LocalServingHostExternalName} + hookCert, err := hookCA.NewServingCert(names...) + if err != nil { + return fmt.Errorf("unable to set up webhook serving certs: %w", err) + } + + localServingCertsDir, err := os.MkdirTemp("", "envtest-serving-certs-") + o.LocalServingCertDir = localServingCertsDir + if err != nil { + return fmt.Errorf("unable to create directory for webhook serving certs: %w", err) + } + + certData, keyData, err := hookCert.AsBytes() + if err != nil { + return fmt.Errorf("unable to marshal webhook serving certs: %w", err) + } + + if err := os.WriteFile(filepath.Join(localServingCertsDir, "tls.crt"), certData, 0640); err != nil { //nolint:gosec + return fmt.Errorf("unable to write webhook serving cert to disk: %w", err) + } + if err := os.WriteFile(filepath.Join(localServingCertsDir, "tls.key"), keyData, 0640); err != nil { //nolint:gosec + return fmt.Errorf("unable to write webhook serving key to disk: %w", err) + } + + o.LocalServingCAData = certData + return err +} + +func createWebhooks(config *rest.Config, mutHooks []*admissionv1.MutatingWebhookConfiguration, valHooks []*admissionv1.ValidatingWebhookConfiguration) error { + cs, err := client.New(config, client.Options{}) + if err != nil { + return err + } + + // Create each webhook + for _, hook := range mutHooks { + hook := hook + log.V(1).Info("installing mutating webhook", "webhook", hook.GetName()) + if err := ensureCreated(cs, hook); err != nil { + return err + } + } + for _, hook := range valHooks { + hook := hook + log.V(1).Info("installing validating webhook", "webhook", hook.GetName()) + if err := ensureCreated(cs, hook); err != nil { + return err + } + } + return nil +} + +// ensureCreated creates or update object if already exists in the cluster. +func ensureCreated(cs client.Client, obj client.Object) error { + existing := obj.DeepCopyObject().(client.Object) + err := cs.Get(context.Background(), client.ObjectKey{Name: obj.GetName()}, existing) + switch { + case apierrors.IsNotFound(err): + if err := cs.Create(context.Background(), obj); err != nil { + return err + } + case err != nil: + return err + default: + log.V(1).Info("Webhook configuration already exists, updating", "webhook", obj.GetName()) + obj.SetResourceVersion(existing.GetResourceVersion()) + if err := cs.Update(context.Background(), obj); err != nil { + return err + } + } + return nil +} + +// parseWebhook reads the directories or files of Webhooks in options.Paths and adds the Webhook structs to options. +func parseWebhook(options *WebhookInstallOptions) error { + if len(options.Paths) > 0 { + for _, path := range options.Paths { + _, err := os.Stat(path) + if options.IgnoreErrorIfPathMissing && os.IsNotExist(err) { + continue // skip this path + } + if !options.IgnoreErrorIfPathMissing && os.IsNotExist(err) { + return err // treat missing path as error + } + mutHooks, valHooks, err := readWebhooks(path) + if err != nil { + return err + } + options.MutatingWebhooks = append(options.MutatingWebhooks, mutHooks...) + options.ValidatingWebhooks = append(options.ValidatingWebhooks, valHooks...) + } + } + return nil +} + +// readWebhooks reads the Webhooks from files and Unmarshals them into structs +// returns slice of mutating and validating webhook configurations. +func readWebhooks(path string) ([]*admissionv1.MutatingWebhookConfiguration, []*admissionv1.ValidatingWebhookConfiguration, error) { + // Get the webhook files + var files []string + var err error + log.V(1).Info("reading Webhooks from path", "path", path) + info, err := os.Stat(path) + if err != nil { + return nil, nil, err + } + if !info.IsDir() { + path, files = filepath.Dir(path), []string{info.Name()} + } else { + entries, err := os.ReadDir(path) + if err != nil { + return nil, nil, err + } + for _, e := range entries { + files = append(files, e.Name()) + } + } + + // file extensions that may contain Webhooks + resourceExtensions := sets.NewString(".json", ".yaml", ".yml") + + var mutHooks []*admissionv1.MutatingWebhookConfiguration + var valHooks []*admissionv1.ValidatingWebhookConfiguration + for _, file := range files { + // Only parse allowlisted file types + if !resourceExtensions.Has(filepath.Ext(file)) { + continue + } + + // Unmarshal Webhooks from file into structs + docs, err := readDocuments(filepath.Join(path, file)) + if err != nil { + return nil, nil, err + } + + for _, doc := range docs { + var generic metav1.PartialObjectMetadata + if err = yaml.Unmarshal(doc, &generic); err != nil { + return nil, nil, err + } + + const ( + admissionregv1 = "admissionregistration.k8s.io/v1" + ) + switch { + case generic.Kind == "MutatingWebhookConfiguration": + if generic.APIVersion != admissionregv1 { + return nil, nil, fmt.Errorf("only v1 is supported right now for MutatingWebhookConfiguration (name: %s)", generic.Name) + } + hook := &admissionv1.MutatingWebhookConfiguration{} + if err := yaml.Unmarshal(doc, hook); err != nil { + return nil, nil, err + } + mutHooks = append(mutHooks, hook) + case generic.Kind == "ValidatingWebhookConfiguration": + if generic.APIVersion != admissionregv1 { + return nil, nil, fmt.Errorf("only v1 is supported right now for ValidatingWebhookConfiguration (name: %s)", generic.Name) + } + hook := &admissionv1.ValidatingWebhookConfiguration{} + if err := yaml.Unmarshal(doc, hook); err != nil { + return nil, nil, err + } + valHooks = append(valHooks, hook) + default: + continue + } + } + + log.V(1).Info("read webhooks from file", "file", file) + } + return mutHooks, valHooks, nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/doc.go new file mode 100644 index 00000000000..11e39823ede --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package flock is copied from k8s.io/kubernetes/pkg/util/flock to avoid +// importing k8s.io/kubernetes as a dependency. +// +// Provides file locking functionalities on unix systems. +package flock diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/errors.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/errors.go new file mode 100644 index 00000000000..ee7a4343722 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/errors.go @@ -0,0 +1,24 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flock + +import "errors" + +var ( + // ErrAlreadyLocked is returned when the file is already locked. + ErrAlreadyLocked = errors.New("the file is already locked") +) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/flock_other.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/flock_other.go new file mode 100644 index 00000000000..069a5b3a2cb --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/flock_other.go @@ -0,0 +1,24 @@ +// +build !linux,!darwin,!freebsd,!openbsd,!netbsd,!dragonfly + +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flock + +// Acquire is not implemented on non-unix systems. +func Acquire(path string) error { + return nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/flock_unix.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/flock_unix.go new file mode 100644 index 00000000000..71ec576df23 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/flock_unix.go @@ -0,0 +1,48 @@ +//go:build linux || darwin || freebsd || openbsd || netbsd || dragonfly +// +build linux darwin freebsd openbsd netbsd dragonfly + +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flock + +import ( + "errors" + "fmt" + "os" + + "golang.org/x/sys/unix" +) + +// Acquire acquires a lock on a file for the duration of the process. This method +// is reentrant. +func Acquire(path string) error { + fd, err := unix.Open(path, unix.O_CREAT|unix.O_RDWR|unix.O_CLOEXEC, 0600) + if err != nil { + if errors.Is(err, os.ErrExist) { + return fmt.Errorf("cannot lock file %q: %w", path, ErrAlreadyLocked) + } + return err + } + + // We don't need to close the fd since we should hold + // it until the process exits. + err = unix.Flock(fd, unix.LOCK_NB|unix.LOCK_EX) + if errors.Is(err, unix.EWOULDBLOCK) { // This condition requires LOCK_NB. + return fmt.Errorf("cannot lock file %q: %w", path, ErrAlreadyLocked) + } + return err +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/addr/manager.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/addr/manager.go new file mode 100644 index 00000000000..ffa33a88616 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/addr/manager.go @@ -0,0 +1,142 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package addr + +import ( + "errors" + "fmt" + "io/fs" + "net" + "os" + "path/filepath" + "strings" + "time" + + "sigs.k8s.io/controller-runtime/pkg/internal/flock" +) + +// TODO(directxman12): interface / release functionality for external port managers + +const ( + portReserveTime = 2 * time.Minute + portConflictRetry = 100 + portFilePrefix = "port-" +) + +var ( + cacheDir string +) + +func init() { + baseDir, err := os.UserCacheDir() + if err == nil { + cacheDir = filepath.Join(baseDir, "kubebuilder-envtest") + err = os.MkdirAll(cacheDir, 0o750) + } + if err != nil { + // Either we didn't get a cache directory, or we can't use it + baseDir = os.TempDir() + cacheDir = filepath.Join(baseDir, "kubebuilder-envtest") + err = os.MkdirAll(cacheDir, 0o750) + } + if err != nil { + panic(err) + } +} + +type portCache struct{} + +func (c *portCache) add(port int) (bool, error) { + // Remove outdated ports. + if err := fs.WalkDir(os.DirFS(cacheDir), ".", func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if d.IsDir() || !d.Type().IsRegular() || !strings.HasPrefix(path, portFilePrefix) { + return nil + } + info, err := d.Info() + if err != nil { + // No-op if file no longer exists; may have been deleted by another + // process/thread trying to allocate ports. + if errors.Is(err, fs.ErrNotExist) { + return nil + } + return err + } + if time.Since(info.ModTime()) > portReserveTime { + if err := os.Remove(filepath.Join(cacheDir, path)); err != nil { + // No-op if file no longer exists; may have been deleted by another + // process/thread trying to allocate ports. + if os.IsNotExist(err) { + return nil + } + return err + } + } + return nil + }); err != nil { + return false, err + } + // Try allocating new port, by acquiring a file. + path := fmt.Sprintf("%s/%s%d", cacheDir, portFilePrefix, port) + if err := flock.Acquire(path); errors.Is(err, flock.ErrAlreadyLocked) { + return false, nil + } else if err != nil { + return false, err + } + return true, nil +} + +var cache = &portCache{} + +func suggest(listenHost string) (*net.TCPListener, int, string, error) { + if listenHost == "" { + listenHost = "localhost" + } + addr, err := net.ResolveTCPAddr("tcp", net.JoinHostPort(listenHost, "0")) + if err != nil { + return nil, -1, "", err + } + l, err := net.ListenTCP("tcp", addr) + if err != nil { + return nil, -1, "", err + } + return l, l.Addr().(*net.TCPAddr).Port, + addr.IP.String(), + nil +} + +// Suggest suggests an address a process can listen on. It returns +// a tuple consisting of a free port and the hostname resolved to its IP. +// It makes sure that new port allocated does not conflict with old ports +// allocated within 1 minute. +func Suggest(listenHost string) (int, string, error) { + for i := 0; i < portConflictRetry; i++ { + listener, port, resolvedHost, err := suggest(listenHost) + if err != nil { + return -1, "", err + } + defer listener.Close() + if ok, err := cache.add(port); ok { + return port, resolvedHost, nil + } else if err != nil { + return -1, "", err + } + } + return -1, "", fmt.Errorf("no free ports found after %d retries", portConflictRetry) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/certs/tinyca.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/certs/tinyca.go new file mode 100644 index 00000000000..b4188237e69 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/certs/tinyca.go @@ -0,0 +1,224 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certs + +// NB(directxman12): nothing has verified that this has good settings. In fact, +// the setting generated here are probably terrible, but they're fine for integration +// tests. These ABSOLUTELY SHOULD NOT ever be exposed in the public API. They're +// ONLY for use with envtest's ability to configure webhook testing. +// If I didn't otherwise not want to add a dependency on cfssl, I'd just use that. + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + crand "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "math/big" + "net" + "time" + + certutil "k8s.io/client-go/util/cert" +) + +var ( + ellipticCurve = elliptic.P256() + bigOne = big.NewInt(1) +) + +// CertPair is a private key and certificate for use for client auth, as a CA, or serving. +type CertPair struct { + Key crypto.Signer + Cert *x509.Certificate +} + +// CertBytes returns the PEM-encoded version of the certificate for this pair. +func (k CertPair) CertBytes() []byte { + return pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: k.Cert.Raw, + }) +} + +// AsBytes encodes keypair in the appropriate formats for on-disk storage (PEM and +// PKCS8, respectively). +func (k CertPair) AsBytes() (cert []byte, key []byte, err error) { + cert = k.CertBytes() + + rawKeyData, err := x509.MarshalPKCS8PrivateKey(k.Key) + if err != nil { + return nil, nil, fmt.Errorf("unable to encode private key: %w", err) + } + + key = pem.EncodeToMemory(&pem.Block{ + Type: "PRIVATE KEY", + Bytes: rawKeyData, + }) + + return cert, key, nil +} + +// TinyCA supports signing serving certs and client-certs, +// and can be used as an auth mechanism with envtest. +type TinyCA struct { + CA CertPair + orgName string + + nextSerial *big.Int +} + +// newPrivateKey generates a new private key of a relatively sane size (see +// rsaKeySize). +func newPrivateKey() (crypto.Signer, error) { + return ecdsa.GenerateKey(ellipticCurve, crand.Reader) +} + +// NewTinyCA creates a new a tiny CA utility for provisioning serving certs and client certs FOR TESTING ONLY. +// Don't use this for anything else! +func NewTinyCA() (*TinyCA, error) { + caPrivateKey, err := newPrivateKey() + if err != nil { + return nil, fmt.Errorf("unable to generate private key for CA: %w", err) + } + caCfg := certutil.Config{CommonName: "envtest-environment", Organization: []string{"envtest"}} + caCert, err := certutil.NewSelfSignedCACert(caCfg, caPrivateKey) + if err != nil { + return nil, fmt.Errorf("unable to generate certificate for CA: %w", err) + } + + return &TinyCA{ + CA: CertPair{Key: caPrivateKey, Cert: caCert}, + orgName: "envtest", + nextSerial: big.NewInt(1), + }, nil +} + +func (c *TinyCA) makeCert(cfg certutil.Config) (CertPair, error) { + now := time.Now() + + key, err := newPrivateKey() + if err != nil { + return CertPair{}, fmt.Errorf("unable to create private key: %w", err) + } + + serial := new(big.Int).Set(c.nextSerial) + c.nextSerial.Add(c.nextSerial, bigOne) + + template := x509.Certificate{ + Subject: pkix.Name{CommonName: cfg.CommonName, Organization: cfg.Organization}, + DNSNames: cfg.AltNames.DNSNames, + IPAddresses: cfg.AltNames.IPs, + SerialNumber: serial, + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: cfg.Usages, + + // technically not necessary for testing, but let's set anyway just in case. + NotBefore: now.UTC(), + // 1 week -- the default for cfssl, and just long enough for a + // long-term test, but not too long that anyone would try to use this + // seriously. + NotAfter: now.Add(168 * time.Hour).UTC(), + } + + certRaw, err := x509.CreateCertificate(crand.Reader, &template, c.CA.Cert, key.Public(), c.CA.Key) + if err != nil { + return CertPair{}, fmt.Errorf("unable to create certificate: %w", err) + } + + cert, err := x509.ParseCertificate(certRaw) + if err != nil { + return CertPair{}, fmt.Errorf("generated invalid certificate, could not parse: %w", err) + } + + return CertPair{ + Key: key, + Cert: cert, + }, nil +} + +// NewServingCert returns a new CertPair for a serving HTTPS on localhost (or other specified names). +func (c *TinyCA) NewServingCert(names ...string) (CertPair, error) { + if len(names) == 0 { + names = []string{"localhost"} + } + dnsNames, ips, err := resolveNames(names) + if err != nil { + return CertPair{}, err + } + + return c.makeCert(certutil.Config{ + CommonName: "localhost", + Organization: []string{c.orgName}, + AltNames: certutil.AltNames{ + DNSNames: dnsNames, + IPs: ips, + }, + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + }) +} + +// ClientInfo describes some Kubernetes user for the purposes of creating +// client certificates. +type ClientInfo struct { + // Name is the user name (embedded as the cert's CommonName) + Name string + // Groups are the groups to which this user belongs (embedded as the cert's + // Organization) + Groups []string +} + +// NewClientCert produces a new CertPair suitable for use with Kubernetes +// client cert auth with an API server validating based on this CA. +func (c *TinyCA) NewClientCert(user ClientInfo) (CertPair, error) { + return c.makeCert(certutil.Config{ + CommonName: user.Name, + Organization: user.Groups, + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + }) +} + +func resolveNames(names []string) ([]string, []net.IP, error) { + dnsNames := []string{} + ips := []net.IP{} + for _, name := range names { + if name == "" { + continue + } + ip := net.ParseIP(name) + if ip == nil { + dnsNames = append(dnsNames, name) + // Also resolve to IPs. + nameIPs, err := net.LookupHost(name) + if err != nil { + return nil, nil, err + } + for _, nameIP := range nameIPs { + ip = net.ParseIP(nameIP) + if ip != nil { + ips = append(ips, ip) + } + } + } else { + ips = append(ips, ip) + } + } + return dnsNames, ips, nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/apiserver.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/apiserver.go new file mode 100644 index 00000000000..c9a1a232ea9 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/apiserver.go @@ -0,0 +1,468 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controlplane + +import ( + "fmt" + "io" + "net/url" + "os" + "path/filepath" + "strconv" + "time" + + "sigs.k8s.io/controller-runtime/pkg/internal/testing/addr" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/certs" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/process" +) + +const ( + // saKeyFile is the name of the service account signing private key file. + saKeyFile = "sa-signer.key" + // saKeyFile is the name of the service account signing public key (cert) file. + saCertFile = "sa-signer.crt" +) + +// SecureServing provides/configures how the API server serves on the secure port. +type SecureServing struct { + // ListenAddr contains the host & port to serve on. + // + // Configurable. If unset, it will be defaulted. + process.ListenAddr + // CA contains the CA that signed the API server's serving certificates. + // + // Read-only. + CA []byte + // Authn can be used to provision users, and override what type of + // authentication is used to provision users. + // + // Configurable. If unset, it will be defaulted. + Authn +} + +// APIServer knows how to run a kubernetes apiserver. +type APIServer struct { + // URL is the address the ApiServer should listen on for client + // connections. + // + // If set, this will configure the *insecure* serving details. + // If unset, it will contain the insecure port if insecure serving is enabled, + // and otherwise will contain the secure port. + // + // If this is not specified, we default to a random free port on localhost. + // + // Deprecated: use InsecureServing (for the insecure URL) or SecureServing, ideally. + URL *url.URL + + // SecurePort is the additional secure port that the APIServer should listen on. + // + // If set, this will override SecureServing.Port. + // + // Deprecated: use SecureServing. + SecurePort int + + // SecureServing indicates how the API server will serve on the secure port. + // + // Some parts are configurable. Will be defaulted if unset. + SecureServing + + // InsecureServing indicates how the API server will serve on the insecure port. + // + // If unset, the insecure port will be disabled. Set to an empty struct to get + // default values. + // + // Deprecated: does not work with Kubernetes versions 1.20 and above. Use secure + // serving instead. + InsecureServing *process.ListenAddr + + // Path is the path to the apiserver binary. + // + // If this is left as the empty string, we will attempt to locate a binary, + // by checking for the TEST_ASSET_KUBE_APISERVER environment variable, and + // the default test assets directory. See the "Binaries" section above (in + // doc.go) for details. + Path string + + // Args is a list of arguments which will passed to the APIServer binary. + // Before they are passed on, they will be evaluated as go-template strings. + // This means you can use fields which are defined and exported on this + // APIServer struct (e.g. "--cert-dir={{ .Dir }}"). + // Those templates will be evaluated after the defaulting of the APIServer's + // fields has already happened and just before the binary actually gets + // started. Thus you have access to calculated fields like `URL` and others. + // + // If not specified, the minimal set of arguments to run the APIServer will + // be used. + // + // They will be loaded into the same argument set as Configure. Each flag + // will be Append-ed to the configured arguments just before launch. + // + // Deprecated: use Configure instead. + Args []string + + // CertDir is a path to a directory containing whatever certificates the + // APIServer will need. + // + // If left unspecified, then the Start() method will create a fresh temporary + // directory, and the Stop() method will clean it up. + CertDir string + + // EtcdURL is the URL of the Etcd the APIServer should use. + // + // If this is not specified, the Start() method will return an error. + EtcdURL *url.URL + + // StartTimeout, StopTimeout specify the time the APIServer is allowed to + // take when starting and stoppping before an error is emitted. + // + // If not specified, these default to 20 seconds. + StartTimeout time.Duration + StopTimeout time.Duration + + // Out, Err specify where APIServer should write its StdOut, StdErr to. + // + // If not specified, the output will be discarded. + Out io.Writer + Err io.Writer + + processState *process.State + + // args contains the structured arguments to use for running the API server + // Lazily initialized by .Configure(), Defaulted eventually with .defaultArgs() + args *process.Arguments +} + +// Configure returns Arguments that may be used to customize the +// flags used to launch the API server. A set of defaults will +// be applied underneath. +func (s *APIServer) Configure() *process.Arguments { + if s.args == nil { + s.args = process.EmptyArguments() + } + return s.args +} + +// Start starts the apiserver, waits for it to come up, and returns an error, +// if occurred. +func (s *APIServer) Start() error { + if err := s.prepare(); err != nil { + return err + } + return s.processState.Start(s.Out, s.Err) +} + +func (s *APIServer) prepare() error { + if err := s.setProcessState(); err != nil { + return err + } + return s.Authn.Start() +} + +// configurePorts configures the serving ports for this API server. +// +// Most of this method currently deals with making the deprecated fields +// take precedence over the new fields. +func (s *APIServer) configurePorts() error { + // prefer the old fields to the new fields if a user set one, + // otherwise, default the new fields and populate the old ones. + + // Insecure: URL, InsecureServing + if s.URL != nil { + s.InsecureServing = &process.ListenAddr{ + Address: s.URL.Hostname(), + Port: s.URL.Port(), + } + } else if insec := s.InsecureServing; insec != nil { + if insec.Port == "" || insec.Address == "" { + port, host, err := addr.Suggest("") + if err != nil { + return fmt.Errorf("unable to provision unused insecure port: %w", err) + } + s.InsecureServing.Port = strconv.Itoa(port) + s.InsecureServing.Address = host + } + s.URL = s.InsecureServing.URL("http", "") + } + + // Secure: SecurePort, SecureServing + if s.SecurePort != 0 { + s.SecureServing.Port = strconv.Itoa(s.SecurePort) + // if we don't have an address, try the insecure address, and otherwise + // default to loopback. + if s.SecureServing.Address == "" { + if s.InsecureServing != nil { + s.SecureServing.Address = s.InsecureServing.Address + } else { + s.SecureServing.Address = "127.0.0.1" + } + } + } else if s.SecureServing.Port == "" || s.SecureServing.Address == "" { + port, host, err := addr.Suggest("") + if err != nil { + return fmt.Errorf("unable to provision unused secure port: %w", err) + } + s.SecureServing.Port = strconv.Itoa(port) + s.SecureServing.Address = host + s.SecurePort = port + } + + return nil +} + +func (s *APIServer) setProcessState() error { + if s.EtcdURL == nil { + return fmt.Errorf("expected EtcdURL to be configured") + } + + var err error + + // unconditionally re-set this so we can successfully restart + // TODO(directxman12): we supported this in the past, but do we actually + // want to support re-using an API server object to restart? The loss + // of provisioned users is surprising to say the least. + s.processState = &process.State{ + Dir: s.CertDir, + Path: s.Path, + StartTimeout: s.StartTimeout, + StopTimeout: s.StopTimeout, + } + if err := s.processState.Init("kube-apiserver"); err != nil { + return err + } + + if err := s.configurePorts(); err != nil { + return err + } + + // the secure port will always be on, so use that + s.processState.HealthCheck.URL = *s.SecureServing.URL("https", "/healthz") + + s.CertDir = s.processState.Dir + s.Path = s.processState.Path + s.StartTimeout = s.processState.StartTimeout + s.StopTimeout = s.processState.StopTimeout + + if err := s.populateAPIServerCerts(); err != nil { + return err + } + + if s.SecureServing.Authn == nil { + authn, err := NewCertAuthn() + if err != nil { + return err + } + s.SecureServing.Authn = authn + } + + if err := s.Authn.Configure(s.CertDir, s.Configure()); err != nil { + return err + } + + // NB(directxman12): insecure port is a mess: + // - 1.19 and below have the `--insecure-port` flag, and require it to be set to zero to + // disable it, otherwise the default will be used and we'll conflict. + // - 1.20 requires the flag to be unset or set to zero, and yells at you if you configure it + // - 1.24 won't have the flag at all... + // + // In an effort to automatically do the right thing during this mess, we do feature discovery + // on the flags, and hope that we've "parsed" them properly. + // + // TODO(directxman12): once we support 1.20 as the min version (might be when 1.24 comes out, + // might be around 1.25 or 1.26), remove this logic and the corresponding line in API server's + // default args. + if err := s.discoverFlags(); err != nil { + return err + } + + s.processState.Args, s.Args, err = process.TemplateAndArguments(s.Args, s.Configure(), process.TemplateDefaults{ //nolint:staticcheck + Data: s, + Defaults: s.defaultArgs(), + MinimalDefaults: map[string][]string{ + // as per kubernetes-sigs/controller-runtime#641, we need this (we + // probably need other stuff too, but this is the only thing that was + // previously considered a "minimal default") + "service-cluster-ip-range": {"10.0.0.0/24"}, + + // we need *some* authorization mode for health checks on the secure port, + // so default to RBAC unless the user set something else (in which case + // this'll be ignored due to SliceToArguments using AppendNoDefaults). + "authorization-mode": {"RBAC"}, + }, + }) + if err != nil { + return err + } + + return nil +} + +// discoverFlags checks for certain flags that *must* be set in certain +// versions, and *must not* be set in others. +func (s *APIServer) discoverFlags() error { + // Present: <1.24, Absent: >= 1.24 + present, err := s.processState.CheckFlag("insecure-port") + if err != nil { + return err + } + + if !present { + s.Configure().Disable("insecure-port") + } + + return nil +} + +func (s *APIServer) defaultArgs() map[string][]string { + args := map[string][]string{ + "service-cluster-ip-range": {"10.0.0.0/24"}, + "allow-privileged": {"true"}, + // we're keeping this disabled because if enabled, default SA is + // missing which would force all tests to create one in normal + // apiserver operation this SA is created by controller, but that is + // not run in integration environment + "disable-admission-plugins": {"ServiceAccount"}, + "cert-dir": {s.CertDir}, + "authorization-mode": {"RBAC"}, + "secure-port": {s.SecureServing.Port}, + // NB(directxman12): previously we didn't set the bind address for the secure + // port. It *shouldn't* make a difference unless people are doing something really + // funky, but if you start to get bug reports look here ;-) + "bind-address": {s.SecureServing.Address}, + + // required on 1.20+, fine to leave on for <1.20 + "service-account-issuer": {s.SecureServing.URL("https", "/").String()}, + "service-account-key-file": {filepath.Join(s.CertDir, saCertFile)}, + "service-account-signing-key-file": {filepath.Join(s.CertDir, saKeyFile)}, + } + if s.EtcdURL != nil { + args["etcd-servers"] = []string{s.EtcdURL.String()} + } + if s.URL != nil { + args["insecure-port"] = []string{s.URL.Port()} + args["insecure-bind-address"] = []string{s.URL.Hostname()} + } else { + // TODO(directxman12): remove this once 1.21 is the lowest version we support + // (this might be a while, but this line'll break as of 1.24, so see the comment + // in Start + args["insecure-port"] = []string{"0"} + } + return args +} + +func (s *APIServer) populateAPIServerCerts() error { + _, statErr := os.Stat(filepath.Join(s.CertDir, "apiserver.crt")) + if !os.IsNotExist(statErr) { + return statErr + } + + ca, err := certs.NewTinyCA() + if err != nil { + return err + } + + servingCerts, err := ca.NewServingCert() + if err != nil { + return err + } + + certData, keyData, err := servingCerts.AsBytes() + if err != nil { + return err + } + + if err := os.WriteFile(filepath.Join(s.CertDir, "apiserver.crt"), certData, 0640); err != nil { //nolint:gosec + return err + } + if err := os.WriteFile(filepath.Join(s.CertDir, "apiserver.key"), keyData, 0640); err != nil { //nolint:gosec + return err + } + + s.SecureServing.CA = ca.CA.CertBytes() + + // service account signing files too + saCA, err := certs.NewTinyCA() + if err != nil { + return err + } + + saCert, saKey, err := saCA.CA.AsBytes() + if err != nil { + return err + } + + if err := os.WriteFile(filepath.Join(s.CertDir, saCertFile), saCert, 0640); err != nil { //nolint:gosec + return err + } + return os.WriteFile(filepath.Join(s.CertDir, saKeyFile), saKey, 0640) //nolint:gosec +} + +// Stop stops this process gracefully, waits for its termination, and cleans up +// the CertDir if necessary. +func (s *APIServer) Stop() error { + if s.processState != nil { + if s.processState.DirNeedsCleaning { + s.CertDir = "" // reset the directory if it was randomly allocated, so that we can safely restart + } + if err := s.processState.Stop(); err != nil { + return err + } + } + return s.Authn.Stop() +} + +// APIServerDefaultArgs exposes the default args for the APIServer so that you +// can use those to append your own additional arguments. +// +// Note that these arguments don't handle newer API servers well to due the more +// complex feature detection neeeded. It's recommended that you switch to .Configure +// as you upgrade API server versions. +// +// Deprecated: use APIServer.Configure(). +var APIServerDefaultArgs = []string{ + "--advertise-address=127.0.0.1", + "--etcd-servers={{ if .EtcdURL }}{{ .EtcdURL.String }}{{ end }}", + "--cert-dir={{ .CertDir }}", + "--insecure-port={{ if .URL }}{{ .URL.Port }}{{else}}0{{ end }}", + "{{ if .URL }}--insecure-bind-address={{ .URL.Hostname }}{{ end }}", + "--secure-port={{ if .SecurePort }}{{ .SecurePort }}{{ end }}", + // we're keeping this disabled because if enabled, default SA is missing which would force all tests to create one + // in normal apiserver operation this SA is created by controller, but that is not run in integration environment + "--disable-admission-plugins=ServiceAccount", + "--service-cluster-ip-range=10.0.0.0/24", + "--allow-privileged=true", + // NB(directxman12): we also enable RBAC if nothing else was enabled +} + +// PrepareAPIServer is an internal-only (NEVER SHOULD BE EXPOSED) +// function that sets up the API server just before starting it, +// without actually starting it. This saves time on tests. +// +// NB(directxman12): do not expose this outside of internal -- it's unsafe to +// use, because things like port allocation could race even more than they +// currently do if you later call start! +func PrepareAPIServer(s *APIServer) error { + return s.prepare() +} + +// APIServerArguments is an internal-only (NEVER SHOULD BE EXPOSED) +// function that sets up the API server just before starting it, +// without actually starting it. It's public to make testing easier. +// +// NB(directxman12): do not expose this outside of internal. +func APIServerArguments(s *APIServer) []string { + return s.processState.Args +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/auth.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/auth.go new file mode 100644 index 00000000000..16c86a712c1 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/auth.go @@ -0,0 +1,142 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controlplane + +import ( + "fmt" + "os" + "path/filepath" + + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/certs" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/process" +) + +// User represents a Kubernetes user. +type User struct { + // Name is the user's Name. + Name string + // Groups are the groups to which the user belongs. + Groups []string +} + +// Authn knows how to configure an API server for a particular type of authentication, +// and provision users under that authentication scheme. +// +// The methods must be called in the following order (as presented below in the interface +// for a mnemonic): +// +// 1. Configure +// 2. Start +// 3. AddUsers (0+ calls) +// 4. Stop. +type Authn interface { + // Configure provides the working directory to this authenticator, + // and configures the given API server arguments to make use of this authenticator. + // + // Should be called first. + Configure(workDir string, args *process.Arguments) error + // Start runs this authenticator. Will be called just before API server start. + // + // Must be called after Configure. + Start() error + // AddUser provisions a user, returning a copy of the given base rest.Config + // configured to authenticate as that users. + // + // May only be called while the authenticator is "running". + AddUser(user User, baseCfg *rest.Config) (*rest.Config, error) + // Stop shuts down this authenticator. + Stop() error +} + +// CertAuthn is an authenticator (Authn) that makes use of client certificate authn. +type CertAuthn struct { + // ca is the CA used to sign the client certs + ca *certs.TinyCA + // certDir is the directory used to write the CA crt file + // so that the API server can read it. + certDir string +} + +// NewCertAuthn creates a new client-cert-based Authn with a new CA. +func NewCertAuthn() (*CertAuthn, error) { + ca, err := certs.NewTinyCA() + if err != nil { + return nil, fmt.Errorf("unable to provision client certificate auth CA: %w", err) + } + return &CertAuthn{ + ca: ca, + }, nil +} + +// AddUser provisions a new user that's authenticated via certificates, with +// the given uesrname and groups embedded in the certificate as expected by the +// API server. +func (c *CertAuthn) AddUser(user User, baseCfg *rest.Config) (*rest.Config, error) { + certs, err := c.ca.NewClientCert(certs.ClientInfo{ + Name: user.Name, + Groups: user.Groups, + }) + if err != nil { + return nil, fmt.Errorf("unable to create client certificates for %s: %w", user.Name, err) + } + + crt, key, err := certs.AsBytes() + if err != nil { + return nil, fmt.Errorf("unable to serialize client certificates for %s: %w", user.Name, err) + } + + cfg := rest.CopyConfig(baseCfg) + cfg.CertData = crt + cfg.KeyData = key + + return cfg, nil +} + +// caCrtPath returns the path to the on-disk client-cert CA crt file. +func (c *CertAuthn) caCrtPath() string { + return filepath.Join(c.certDir, "client-cert-auth-ca.crt") +} + +// Configure provides the working directory to this authenticator, +// and configures the given API server arguments to make use of this authenticator. +func (c *CertAuthn) Configure(workDir string, args *process.Arguments) error { + c.certDir = workDir + args.Set("client-ca-file", c.caCrtPath()) + return nil +} + +// Start runs this authenticator. Will be called just before API server start. +// +// Must be called after Configure. +func (c *CertAuthn) Start() error { + if len(c.certDir) == 0 { + return fmt.Errorf("start called before configure") + } + caCrt := c.ca.CA.CertBytes() + if err := os.WriteFile(c.caCrtPath(), caCrt, 0640); err != nil { //nolint:gosec + return fmt.Errorf("unable to save the client certificate CA to %s: %w", c.caCrtPath(), err) + } + + return nil +} + +// Stop shuts down this authenticator. +func (c *CertAuthn) Stop() error { + // no-op -- our workdir is cleaned up for us automatically + return nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/etcd.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/etcd.go new file mode 100644 index 00000000000..c30d2132952 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/etcd.go @@ -0,0 +1,202 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controlplane + +import ( + "io" + "net" + "net/url" + "strconv" + "time" + + "sigs.k8s.io/controller-runtime/pkg/internal/testing/addr" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/process" +) + +// Etcd knows how to run an etcd server. +type Etcd struct { + // URL is the address the Etcd should listen on for client connections. + // + // If this is not specified, we default to a random free port on localhost. + URL *url.URL + + // Path is the path to the etcd binary. + // + // If this is left as the empty string, we will attempt to locate a binary, + // by checking for the TEST_ASSET_ETCD environment variable, and the default + // test assets directory. See the "Binaries" section above (in doc.go) for + // details. + Path string + + // Args is a list of arguments which will passed to the Etcd binary. Before + // they are passed on, the`y will be evaluated as go-template strings. This + // means you can use fields which are defined and exported on this Etcd + // struct (e.g. "--data-dir={{ .Dir }}"). + // Those templates will be evaluated after the defaulting of the Etcd's + // fields has already happened and just before the binary actually gets + // started. Thus you have access to calculated fields like `URL` and others. + // + // If not specified, the minimal set of arguments to run the Etcd will be + // used. + // + // They will be loaded into the same argument set as Configure. Each flag + // will be Append-ed to the configured arguments just before launch. + // + // Deprecated: use Configure instead. + Args []string + + // DataDir is a path to a directory in which etcd can store its state. + // + // If left unspecified, then the Start() method will create a fresh temporary + // directory, and the Stop() method will clean it up. + DataDir string + + // StartTimeout, StopTimeout specify the time the Etcd is allowed to + // take when starting and stopping before an error is emitted. + // + // If not specified, these default to 20 seconds. + StartTimeout time.Duration + StopTimeout time.Duration + + // Out, Err specify where Etcd should write its StdOut, StdErr to. + // + // If not specified, the output will be discarded. + Out io.Writer + Err io.Writer + + // processState contains the actual details about this running process + processState *process.State + + // args contains the structured arguments to use for running etcd. + // Lazily initialized by .Configure(), Defaulted eventually with .defaultArgs() + args *process.Arguments + + // listenPeerURL is the address the Etcd should listen on for peer connections. + // It's automatically generated and a random port is picked during execution. + listenPeerURL *url.URL +} + +// Start starts the etcd, waits for it to come up, and returns an error, if one +// occurred. +func (e *Etcd) Start() error { + if err := e.setProcessState(); err != nil { + return err + } + return e.processState.Start(e.Out, e.Err) +} + +func (e *Etcd) setProcessState() error { + e.processState = &process.State{ + Dir: e.DataDir, + Path: e.Path, + StartTimeout: e.StartTimeout, + StopTimeout: e.StopTimeout, + } + + // unconditionally re-set this so we can successfully restart + // TODO(directxman12): we supported this in the past, but do we actually + // want to support re-using an API server object to restart? The loss + // of provisioned users is surprising to say the least. + if err := e.processState.Init("etcd"); err != nil { + return err + } + + // Set the listen url. + if e.URL == nil { + port, host, err := addr.Suggest("") + if err != nil { + return err + } + e.URL = &url.URL{ + Scheme: "http", + Host: net.JoinHostPort(host, strconv.Itoa(port)), + } + } + + // Set the listen peer URL. + { + port, host, err := addr.Suggest("") + if err != nil { + return err + } + e.listenPeerURL = &url.URL{ + Scheme: "http", + Host: net.JoinHostPort(host, strconv.Itoa(port)), + } + } + + // can use /health as of etcd 3.3.0 + e.processState.HealthCheck.URL = *e.URL + e.processState.HealthCheck.Path = "/health" + + e.DataDir = e.processState.Dir + e.Path = e.processState.Path + e.StartTimeout = e.processState.StartTimeout + e.StopTimeout = e.processState.StopTimeout + + var err error + e.processState.Args, e.Args, err = process.TemplateAndArguments(e.Args, e.Configure(), process.TemplateDefaults{ //nolint:staticcheck + Data: e, + Defaults: e.defaultArgs(), + }) + return err +} + +// Stop stops this process gracefully, waits for its termination, and cleans up +// the DataDir if necessary. +func (e *Etcd) Stop() error { + if e.processState.DirNeedsCleaning { + e.DataDir = "" // reset the directory if it was randomly allocated, so that we can safely restart + } + return e.processState.Stop() +} + +func (e *Etcd) defaultArgs() map[string][]string { + args := map[string][]string{ + "listen-peer-urls": {e.listenPeerURL.String()}, + "data-dir": {e.DataDir}, + } + if e.URL != nil { + args["advertise-client-urls"] = []string{e.URL.String()} + args["listen-client-urls"] = []string{e.URL.String()} + } + + // Add unsafe no fsync, available from etcd 3.5 + if ok, _ := e.processState.CheckFlag("unsafe-no-fsync"); ok { + args["unsafe-no-fsync"] = []string{"true"} + } + return args +} + +// Configure returns Arguments that may be used to customize the +// flags used to launch etcd. A set of defaults will +// be applied underneath. +func (e *Etcd) Configure() *process.Arguments { + if e.args == nil { + e.args = process.EmptyArguments() + } + return e.args +} + +// EtcdDefaultArgs exposes the default args for Etcd so that you +// can use those to append your own additional arguments. +var EtcdDefaultArgs = []string{ + "--listen-peer-urls=http://localhost:0", + "--advertise-client-urls={{ if .URL }}{{ .URL.String }}{{ end }}", + "--listen-client-urls={{ if .URL }}{{ .URL.String }}{{ end }}", + "--data-dir={{ .DataDir }}", +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/kubectl.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/kubectl.go new file mode 100644 index 00000000000..a41bb77c4dd --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/kubectl.go @@ -0,0 +1,120 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controlplane + +import ( + "bytes" + "fmt" + "io" + "net/url" + "os/exec" + + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + kcapi "k8s.io/client-go/tools/clientcmd/api" + + "sigs.k8s.io/controller-runtime/pkg/internal/testing/process" +) + +const ( + envtestName = "envtest" +) + +// KubeConfigFromREST reverse-engineers a kubeconfig file from a rest.Config. +// The options are tailored towards the rest.Configs we generate, so they're +// not broadly applicable. +// +// This is not intended to be exposed beyond internal for the above reasons. +func KubeConfigFromREST(cfg *rest.Config) ([]byte, error) { + kubeConfig := kcapi.NewConfig() + protocol := "https" + if !rest.IsConfigTransportTLS(*cfg) { + protocol = "http" + } + + // cfg.Host is a URL, so we need to parse it so we can properly append the API path + baseURL, err := url.Parse(cfg.Host) + if err != nil { + return nil, fmt.Errorf("unable to interpret config's host value as a URL: %w", err) + } + + kubeConfig.Clusters[envtestName] = &kcapi.Cluster{ + // TODO(directxman12): if client-go ever decides to expose defaultServerUrlFor(config), + // we can just use that. Note that this is not the same as the public DefaultServerURL, + // which requires us to pass a bunch of stuff in manually. + Server: (&url.URL{Scheme: protocol, Host: baseURL.Host, Path: cfg.APIPath}).String(), + CertificateAuthorityData: cfg.CAData, + } + kubeConfig.AuthInfos[envtestName] = &kcapi.AuthInfo{ + // try to cover all auth strategies that aren't plugins + ClientCertificateData: cfg.CertData, + ClientKeyData: cfg.KeyData, + Token: cfg.BearerToken, + Username: cfg.Username, + Password: cfg.Password, + } + kcCtx := kcapi.NewContext() + kcCtx.Cluster = envtestName + kcCtx.AuthInfo = envtestName + kubeConfig.Contexts[envtestName] = kcCtx + kubeConfig.CurrentContext = envtestName + + contents, err := clientcmd.Write(*kubeConfig) + if err != nil { + return nil, fmt.Errorf("unable to serialize kubeconfig file: %w", err) + } + return contents, nil +} + +// KubeCtl is a wrapper around the kubectl binary. +type KubeCtl struct { + // Path where the kubectl binary can be found. + // + // If this is left empty, we will attempt to locate a binary, by checking for + // the TEST_ASSET_KUBECTL environment variable, and the default test assets + // directory. See the "Binaries" section above (in doc.go) for details. + Path string + + // Opts can be used to configure additional flags which will be used each + // time the wrapped binary is called. + // + // For example, you might want to use this to set the URL of the APIServer to + // connect to. + Opts []string +} + +// Run executes the wrapped binary with some preconfigured options and the +// arguments given to this method. It returns Readers for the stdout and +// stderr. +func (k *KubeCtl) Run(args ...string) (stdout, stderr io.Reader, err error) { + if k.Path == "" { + k.Path = process.BinPathFinder("kubectl", "") + } + + stdoutBuffer := &bytes.Buffer{} + stderrBuffer := &bytes.Buffer{} + allArgs := append(k.Opts, args...) + + cmd := exec.Command(k.Path, allArgs...) + cmd.Stdout = stdoutBuffer + cmd.Stderr = stderrBuffer + cmd.SysProcAttr = process.GetSysProcAttr() + + err = cmd.Run() + + return stdoutBuffer, stderrBuffer, err +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/plane.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/plane.go new file mode 100644 index 00000000000..456183a7a32 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/plane.go @@ -0,0 +1,259 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controlplane + +import ( + "fmt" + "net/url" + "os" + + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/certs" +) + +// NewTinyCA creates a new a tiny CA utility for provisioning serving certs and client certs FOR TESTING ONLY. +// Don't use this for anything else! +var NewTinyCA = certs.NewTinyCA + +// ControlPlane is a struct that knows how to start your test control plane. +// +// Right now, that means Etcd and your APIServer. This is likely to increase in +// future. +type ControlPlane struct { + APIServer *APIServer + Etcd *Etcd + + // Kubectl will override the default asset search path for kubectl + KubectlPath string + + // for the deprecated methods (Kubectl, etc) + defaultUserCfg *rest.Config + defaultUserKubectl *KubeCtl +} + +// Start will start your control plane processes. To stop them, call Stop(). +func (f *ControlPlane) Start() (retErr error) { + if f.Etcd == nil { + f.Etcd = &Etcd{} + } + if err := f.Etcd.Start(); err != nil { + return err + } + defer func() { + if retErr != nil { + _ = f.Etcd.Stop() + } + }() + + if f.APIServer == nil { + f.APIServer = &APIServer{} + } + f.APIServer.EtcdURL = f.Etcd.URL + if err := f.APIServer.Start(); err != nil { + return err + } + defer func() { + if retErr != nil { + _ = f.APIServer.Stop() + } + }() + + // provision the default user -- can be removed when the related + // methods are removed. The default user has admin permissions to + // mimic legacy no-authz setups. + user, err := f.AddUser(User{Name: "default", Groups: []string{"system:masters"}}, &rest.Config{}) + if err != nil { + return fmt.Errorf("unable to provision the default (legacy) user: %w", err) + } + kubectl, err := user.Kubectl() + if err != nil { + return fmt.Errorf("unable to provision the default (legacy) kubeconfig: %w", err) + } + f.defaultUserCfg = user.Config() + f.defaultUserKubectl = kubectl + return nil +} + +// Stop will stop your control plane processes, and clean up their data. +func (f *ControlPlane) Stop() error { + var errList []error + + if f.APIServer != nil { + if err := f.APIServer.Stop(); err != nil { + errList = append(errList, err) + } + } + + if f.Etcd != nil { + if err := f.Etcd.Stop(); err != nil { + errList = append(errList, err) + } + } + + return kerrors.NewAggregate(errList) +} + +// APIURL returns the URL you should connect to to talk to your API server. +// +// If insecure serving is configured, this will contain the insecure port. +// Otherwise, it will contain the secure port. +// +// Deprecated: use AddUser instead, or APIServer.{Ins|S}ecureServing.URL if +// you really want just the URL. +func (f *ControlPlane) APIURL() *url.URL { + return f.APIServer.URL +} + +// KubeCtl returns a pre-configured KubeCtl, ready to connect to this +// ControlPlane. +// +// Deprecated: use AddUser & AuthenticatedUser.Kubectl instead. +func (f *ControlPlane) KubeCtl() *KubeCtl { + return f.defaultUserKubectl +} + +// RESTClientConfig returns a pre-configured restconfig, ready to connect to +// this ControlPlane. +// +// Deprecated: use AddUser & AuthenticatedUser.Config instead. +func (f *ControlPlane) RESTClientConfig() (*rest.Config, error) { + return f.defaultUserCfg, nil +} + +// AuthenticatedUser contains access information for an provisioned user, +// including REST config, kubeconfig contents, and access to a KubeCtl instance. +// +// It's not "safe" to use the methods on this till after the API server has been +// started (due to certificate initialization and such). The various methods will +// panic if this is done. +type AuthenticatedUser struct { + // cfg is the rest.Config for connecting to the API server. It's lazily initialized. + cfg *rest.Config + // cfgIsComplete indicates the cfg has had late-initialized fields (e.g. + // API server CA data) initialized. + cfgIsComplete bool + + // apiServer is a handle to the APIServer that's used when finalizing cfg + // and producing the kubectl instance. + plane *ControlPlane + + // kubectl is our existing, provisioned kubectl. We don't provision one + // till someone actually asks for it. + kubectl *KubeCtl +} + +// Config returns the REST config that can be used to connect to the API server +// as this user. +// +// Will panic if used before the API server is started. +func (u *AuthenticatedUser) Config() *rest.Config { + // NB(directxman12): we choose to panic here for ergonomics sake, and because there's + // not really much you can do to "handle" this error. This machinery is intended to be + // used in tests anyway, so panicing is not a particularly big deal. + if u.cfgIsComplete { + return u.cfg + } + if len(u.plane.APIServer.SecureServing.CA) == 0 { + panic("the API server has not yet been started, please do that before accessing connection details") + } + + u.cfg.CAData = u.plane.APIServer.SecureServing.CA + u.cfg.Host = u.plane.APIServer.SecureServing.URL("https", "/").String() + u.cfgIsComplete = true + return u.cfg +} + +// KubeConfig returns a KubeConfig that's roughly equivalent to this user's REST config. +// +// Will panic if used before the API server is started. +func (u AuthenticatedUser) KubeConfig() ([]byte, error) { + // NB(directxman12): we don't return the actual API object to avoid yet another + // piece of kubernetes API in our public API, and also because generally the thing + // you want to do with this is just write it out to a file for external debugging + // purposes, etc. + return KubeConfigFromREST(u.Config()) +} + +// Kubectl returns a KubeCtl instance for talking to the API server as this user. It uses +// a kubeconfig equivalent to that returned by .KubeConfig. +// +// Will panic if used before the API server is started. +func (u *AuthenticatedUser) Kubectl() (*KubeCtl, error) { + if u.kubectl != nil { + return u.kubectl, nil + } + if len(u.plane.APIServer.CertDir) == 0 { + panic("the API server has not yet been started, please do that before accessing connection details") + } + + // cleaning this up is handled when our tmpDir is deleted + out, err := os.CreateTemp(u.plane.APIServer.CertDir, "*.kubecfg") + if err != nil { + return nil, fmt.Errorf("unable to create file for kubeconfig: %w", err) + } + defer out.Close() + contents, err := KubeConfigFromREST(u.Config()) + if err != nil { + return nil, err + } + if _, err := out.Write(contents); err != nil { + return nil, fmt.Errorf("unable to write kubeconfig to disk at %s: %w", out.Name(), err) + } + k := &KubeCtl{ + Path: u.plane.KubectlPath, + } + k.Opts = append(k.Opts, fmt.Sprintf("--kubeconfig=%s", out.Name())) + u.kubectl = k + return k, nil +} + +// AddUser provisions a new user in the cluster. It uses the APIServer's authentication +// strategy -- see APIServer.SecureServing.Authn. +// +// Unlike AddUser, it's safe to pass a nil rest.Config here if you have no +// particular opinions about the config. +// +// The default authentication strategy is not guaranteed to any specific strategy, but it is +// guaranteed to be callable both before and after Start has been called (but, as noted in the +// AuthenticatedUser docs, the given user objects are only valid after Start has been called). +func (f *ControlPlane) AddUser(user User, baseConfig *rest.Config) (*AuthenticatedUser, error) { + if f.GetAPIServer().SecureServing.Authn == nil { + return nil, fmt.Errorf("no API server authentication is configured yet. The API server defaults one when Start is called, did you mean to use that?") + } + + if baseConfig == nil { + baseConfig = &rest.Config{} + } + cfg, err := f.GetAPIServer().SecureServing.AddUser(user, baseConfig) + if err != nil { + return nil, err + } + + return &AuthenticatedUser{ + cfg: cfg, + plane: f, + }, nil +} + +// GetAPIServer returns this ControlPlane's APIServer, initializing it if necessary. +func (f *ControlPlane) GetAPIServer() *APIServer { + if f.APIServer == nil { + f.APIServer = &APIServer{} + } + return f.APIServer +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/arguments.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/arguments.go new file mode 100644 index 00000000000..391eec1facf --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/arguments.go @@ -0,0 +1,340 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package process + +import ( + "bytes" + "html/template" + "sort" + "strings" +) + +// RenderTemplates returns an []string to render the templates +// +// Deprecated: will be removed in favor of Arguments. +func RenderTemplates(argTemplates []string, data interface{}) (args []string, err error) { + var t *template.Template + + for _, arg := range argTemplates { + t, err = template.New(arg).Parse(arg) + if err != nil { + args = nil + return + } + + buf := &bytes.Buffer{} + err = t.Execute(buf, data) + if err != nil { + args = nil + return + } + args = append(args, buf.String()) + } + + return +} + +// SliceToArguments converts a slice of arguments to structured arguments, +// appending each argument that starts with `--` and contains an `=` to the +// argument set (ignoring defaults), returning the rest. +// +// Deprecated: will be removed when RenderTemplates is removed. +func SliceToArguments(sliceArgs []string, args *Arguments) []string { + var rest []string + for i, arg := range sliceArgs { + if arg == "--" { + rest = append(rest, sliceArgs[i:]...) + return rest + } + // skip non-flag arguments, skip arguments w/o equals because we + // can't tell if the next argument should take a value + if !strings.HasPrefix(arg, "--") || !strings.Contains(arg, "=") { + rest = append(rest, arg) + continue + } + + parts := strings.SplitN(arg[2:], "=", 2) + name := parts[0] + val := parts[1] + + args.AppendNoDefaults(name, val) + } + + return rest +} + +// TemplateDefaults specifies defaults to be used for joining structured arguments with templates. +// +// Deprecated: will be removed when RenderTemplates is removed. +type TemplateDefaults struct { + // Data will be used to render the template. + Data interface{} + // Defaults will be used to default structured arguments if no template is passed. + Defaults map[string][]string + // MinimalDefaults will be used to default structured arguments if a template is passed. + // Use this for flags which *must* be present. + MinimalDefaults map[string][]string // for api server service-cluster-ip-range +} + +// TemplateAndArguments joins structured arguments and non-structured arguments, preserving existing +// behavior. Namely: +// +// 1. if templ has len > 0, it will be rendered against data +// 2. the rendered template values that look like `--foo=bar` will be split +// and appended to args, the rest will be kept around +// 3. the given args will be rendered as string form. If a template is given, +// no defaults will be used, otherwise defaults will be used +// 4. a result of [args..., rest...] will be returned +// +// It returns the resulting rendered arguments, plus the arguments that were +// not transferred to `args` during rendering. +// +// Deprecated: will be removed when RenderTemplates is removed. +func TemplateAndArguments(templ []string, args *Arguments, data TemplateDefaults) (allArgs []string, nonFlagishArgs []string, err error) { + if len(templ) == 0 { // 3 & 4 (no template case) + return args.AsStrings(data.Defaults), nil, nil + } + + // 1: render the template + rendered, err := RenderTemplates(templ, data.Data) + if err != nil { + return nil, nil, err + } + + // 2: filter out structured args and add them to args + rest := SliceToArguments(rendered, args) + + // 3 (template case): render structured args, no defaults (matching the + // legacy case where if Args was specified, no defaults were used) + res := args.AsStrings(data.MinimalDefaults) + + // 4: return the rendered structured args + all non-structured args + return append(res, rest...), rest, nil +} + +// EmptyArguments constructs an empty set of flags with no defaults. +func EmptyArguments() *Arguments { + return &Arguments{ + values: make(map[string]Arg), + } +} + +// Arguments are structured, overridable arguments. +// Each Arguments object contains some set of default arguments, which may +// be appended to, or overridden. +// +// When ready, you can serialize them to pass to exec.Command and friends using +// AsStrings. +// +// All flag-setting methods return the *same* instance of Arguments so that you +// can chain calls. +type Arguments struct { + // values contains the user-set values for the arguments. + // `values[key] = dontPass` means "don't pass this flag" + // `values[key] = passAsName` means "pass this flag without args like --key` + // `values[key] = []string{a, b, c}` means "--key=a --key=b --key=c` + // any values not explicitly set here will be copied from defaults on final rendering. + values map[string]Arg +} + +// Arg is an argument that has one or more values, +// and optionally falls back to default values. +type Arg interface { + // Append adds new values to this argument, returning + // a new instance contain the new value. The intermediate + // argument should generally be assumed to be consumed. + Append(vals ...string) Arg + // Get returns the full set of values, optionally including + // the passed in defaults. If it returns nil, this will be + // skipped. If it returns a non-nil empty slice, it'll be + // assumed that the argument should be passed as name-only. + Get(defaults []string) []string +} + +type userArg []string + +func (a userArg) Append(vals ...string) Arg { + return userArg(append(a, vals...)) //nolint:unconvert +} +func (a userArg) Get(_ []string) []string { + return []string(a) +} + +type defaultedArg []string + +func (a defaultedArg) Append(vals ...string) Arg { + return defaultedArg(append(a, vals...)) //nolint:unconvert +} +func (a defaultedArg) Get(defaults []string) []string { + res := append([]string(nil), defaults...) + return append(res, a...) +} + +type dontPassArg struct{} + +func (a dontPassArg) Append(vals ...string) Arg { + return userArg(vals) +} +func (dontPassArg) Get(_ []string) []string { + return nil +} + +type passAsNameArg struct{} + +func (a passAsNameArg) Append(_ ...string) Arg { + return passAsNameArg{} +} +func (passAsNameArg) Get(_ []string) []string { + return []string{} +} + +var ( + // DontPass indicates that the given argument will not actually be + // rendered. + DontPass Arg = dontPassArg{} + // PassAsName indicates that the given flag will be passed as `--key` + // without any value. + PassAsName Arg = passAsNameArg{} +) + +// AsStrings serializes this set of arguments to a slice of strings appropriate +// for passing to exec.Command and friends, making use of the given defaults +// as indicated for each particular argument. +// +// - Any flag in defaults that's not in Arguments will be present in the output +// - Any flag that's present in Arguments will be passed the corresponding +// defaults to do with as it will (ignore, append-to, suppress, etc). +func (a *Arguments) AsStrings(defaults map[string][]string) []string { + // sort for deterministic ordering + keysInOrder := make([]string, 0, len(defaults)+len(a.values)) + for key := range defaults { + if _, userSet := a.values[key]; userSet { + continue + } + keysInOrder = append(keysInOrder, key) + } + for key := range a.values { + keysInOrder = append(keysInOrder, key) + } + sort.Strings(keysInOrder) + + var res []string + for _, key := range keysInOrder { + vals := a.Get(key).Get(defaults[key]) + switch { + case vals == nil: // don't pass + continue + case len(vals) == 0: // pass as name + res = append(res, "--"+key) + default: + for _, val := range vals { + res = append(res, "--"+key+"="+val) + } + } + } + + return res +} + +// Get returns the value of the given flag. If nil, +// it will not be passed in AsString, otherwise: +// +// len == 0 --> `--key`, len > 0 --> `--key=val1 --key=val2 ...`. +func (a *Arguments) Get(key string) Arg { + if vals, ok := a.values[key]; ok { + return vals + } + return defaultedArg(nil) +} + +// Enable configures the given key to be passed as a "name-only" flag, +// like, `--key`. +func (a *Arguments) Enable(key string) *Arguments { + a.values[key] = PassAsName + return a +} + +// Disable prevents this flag from be passed. +func (a *Arguments) Disable(key string) *Arguments { + a.values[key] = DontPass + return a +} + +// Append adds additional values to this flag. If this flag has +// yet to be set, initial values will include defaults. If you want +// to intentionally ignore defaults/start from scratch, call AppendNoDefaults. +// +// Multiple values will look like `--key=value1 --key=value2 ...`. +func (a *Arguments) Append(key string, values ...string) *Arguments { + vals, present := a.values[key] + if !present { + vals = defaultedArg{} + } + a.values[key] = vals.Append(values...) + return a +} + +// AppendNoDefaults adds additional values to this flag. However, +// unlike Append, it will *not* copy values from defaults. +func (a *Arguments) AppendNoDefaults(key string, values ...string) *Arguments { + vals, present := a.values[key] + if !present { + vals = userArg{} + } + a.values[key] = vals.Append(values...) + return a +} + +// Set resets the given flag to the specified values, ignoring any existing +// values or defaults. +func (a *Arguments) Set(key string, values ...string) *Arguments { + a.values[key] = userArg(values) + return a +} + +// SetRaw sets the given flag to the given Arg value directly. Use this if +// you need to do some complicated deferred logic or something. +// +// Otherwise behaves like Set. +func (a *Arguments) SetRaw(key string, val Arg) *Arguments { + a.values[key] = val + return a +} + +// FuncArg is a basic implementation of Arg that can be used for custom argument logic, +// like pulling values out of APIServer, or dynamically calculating values just before +// launch. +// +// The given function will be mapped directly to Arg#Get, and will generally be +// used in conjunction with SetRaw. For example, to set `--some-flag` to the +// API server's CertDir, you could do: +// +// server.Configure().SetRaw("--some-flag", FuncArg(func(defaults []string) []string { +// return []string{server.CertDir} +// })) +// +// FuncArg ignores Appends; if you need to support appending values too, consider implementing +// Arg directly. +type FuncArg func([]string) []string + +// Append is a no-op for FuncArg, and just returns itself. +func (a FuncArg) Append(vals ...string) Arg { return a } + +// Get delegates functionality to the FuncArg function itself. +func (a FuncArg) Get(defaults []string) []string { + return a(defaults) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/bin_path_finder.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/bin_path_finder.go new file mode 100644 index 00000000000..e1428aa6e5a --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/bin_path_finder.go @@ -0,0 +1,70 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package process + +import ( + "os" + "path/filepath" + "regexp" + "strings" +) + +const ( + // EnvAssetsPath is the environment variable that stores the global test + // binary location override. + EnvAssetsPath = "KUBEBUILDER_ASSETS" + // EnvAssetOverridePrefix is the environment variable prefix for per-binary + // location overrides. + EnvAssetOverridePrefix = "TEST_ASSET_" + // AssetsDefaultPath is the default location to look for test binaries in, + // if no override was provided. + AssetsDefaultPath = "/usr/local/kubebuilder/bin" +) + +// BinPathFinder finds the path to the given named binary, using the following locations +// in order of precedence (highest first). Notice that the various env vars only need +// to be set -- the asset is not checked for existence on the filesystem. +// +// 1. TEST_ASSET_{tr/a-z-/A-Z_/} (if set; asset overrides -- EnvAssetOverridePrefix) +// 1. KUBEBUILDER_ASSETS (if set; global asset path -- EnvAssetsPath) +// 3. assetDirectory (if set; per-config asset directory) +// 4. /usr/local/kubebuilder/bin (AssetsDefaultPath). +func BinPathFinder(symbolicName, assetDirectory string) (binPath string) { + punctuationPattern := regexp.MustCompile("[^A-Z0-9]+") + sanitizedName := punctuationPattern.ReplaceAllString(strings.ToUpper(symbolicName), "_") + leadingNumberPattern := regexp.MustCompile("^[0-9]+") + sanitizedName = leadingNumberPattern.ReplaceAllString(sanitizedName, "") + envVar := EnvAssetOverridePrefix + sanitizedName + + // TEST_ASSET_XYZ + if val, ok := os.LookupEnv(envVar); ok { + return val + } + + // KUBEBUILDER_ASSETS + if val, ok := os.LookupEnv(EnvAssetsPath); ok { + return filepath.Join(val, symbolicName) + } + + // assetDirectory + if assetDirectory != "" { + return filepath.Join(assetDirectory, symbolicName) + } + + // default path + return filepath.Join(AssetsDefaultPath, symbolicName) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/procattr_other.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/procattr_other.go new file mode 100644 index 00000000000..df13b341a48 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/procattr_other.go @@ -0,0 +1,28 @@ +//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !zos +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!zos + +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package process + +import "syscall" + +// GetSysProcAttr returns the SysProcAttr to use for the process, +// for non-unix systems this returns nil. +func GetSysProcAttr() *syscall.SysProcAttr { + return nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/procattr_unix.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/procattr_unix.go new file mode 100644 index 00000000000..83ad509af0f --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/procattr_unix.go @@ -0,0 +1,33 @@ +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package process + +import ( + "golang.org/x/sys/unix" +) + +// GetSysProcAttr returns the SysProcAttr to use for the process, +// for unix systems this returns a SysProcAttr with Setpgid set to true, +// which inherits the parent's process group id. +func GetSysProcAttr() *unix.SysProcAttr { + return &unix.SysProcAttr{ + Setpgid: true, + } +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/process.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/process.go new file mode 100644 index 00000000000..03f252524a3 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/process.go @@ -0,0 +1,276 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package process + +import ( + "crypto/tls" + "fmt" + "io" + "net" + "net/http" + "net/url" + "os" + "os/exec" + "path" + "regexp" + "sync" + "syscall" + "time" +) + +// ListenAddr represents some listening address and port. +type ListenAddr struct { + Address string + Port string +} + +// URL returns a URL for this address with the given scheme and subpath. +func (l *ListenAddr) URL(scheme string, path string) *url.URL { + return &url.URL{ + Scheme: scheme, + Host: l.HostPort(), + Path: path, + } +} + +// HostPort returns the joined host-port pair for this address. +func (l *ListenAddr) HostPort() string { + return net.JoinHostPort(l.Address, l.Port) +} + +// HealthCheck describes the information needed to health-check a process via +// some health-check URL. +type HealthCheck struct { + url.URL + + // HealthCheckPollInterval is the interval which will be used for polling the + // endpoint described by Host, Port, and Path. + // + // If left empty it will default to 100 Milliseconds. + PollInterval time.Duration +} + +// State define the state of the process. +type State struct { + Cmd *exec.Cmd + + // HealthCheck describes how to check if this process is up. If we get an http.StatusOK, + // we assume the process is ready to operate. + // + // For example, the /healthz endpoint of the k8s API server, or the /health endpoint of etcd. + HealthCheck HealthCheck + + Args []string + + StopTimeout time.Duration + StartTimeout time.Duration + + Dir string + DirNeedsCleaning bool + Path string + + // ready holds whether the process is currently in ready state (hit the ready condition) or not. + // It will be set to true on a successful `Start()` and set to false on a successful `Stop()` + ready bool + + // waitDone is closed when our call to wait finishes up, and indicates that + // our process has terminated. + waitDone chan struct{} + errMu sync.Mutex + exitErr error + exited bool +} + +// Init sets up this process, configuring binary paths if missing, initializing +// temporary directories, etc. +// +// This defaults all defaultable fields. +func (ps *State) Init(name string) error { + if ps.Path == "" { + if name == "" { + return fmt.Errorf("must have at least one of name or path") + } + ps.Path = BinPathFinder(name, "") + } + + if ps.Dir == "" { + newDir, err := os.MkdirTemp("", "k8s_test_framework_") + if err != nil { + return err + } + ps.Dir = newDir + ps.DirNeedsCleaning = true + } + + if ps.StartTimeout == 0 { + ps.StartTimeout = 20 * time.Second + } + + if ps.StopTimeout == 0 { + ps.StopTimeout = 20 * time.Second + } + return nil +} + +type stopChannel chan struct{} + +// CheckFlag checks the help output of this command for the presence of the given flag, specified +// without the leading `--` (e.g. `CheckFlag("insecure-port")` checks for `--insecure-port`), +// returning true if the flag is present. +func (ps *State) CheckFlag(flag string) (bool, error) { + cmd := exec.Command(ps.Path, "--help") + outContents, err := cmd.CombinedOutput() + if err != nil { + return false, fmt.Errorf("unable to run command %q to check for flag %q: %w", ps.Path, flag, err) + } + pat := `(?m)^\s*--` + flag + `\b` // (m --> multi-line --> ^ matches start of line) + matched, err := regexp.Match(pat, outContents) + if err != nil { + return false, fmt.Errorf("unable to check command %q for flag %q in help output: %w", ps.Path, flag, err) + } + return matched, nil +} + +// Start starts the apiserver, waits for it to come up, and returns an error, +// if occurred. +func (ps *State) Start(stdout, stderr io.Writer) (err error) { + if ps.ready { + return nil + } + + ps.Cmd = exec.Command(ps.Path, ps.Args...) + ps.Cmd.Stdout = stdout + ps.Cmd.Stderr = stderr + ps.Cmd.SysProcAttr = GetSysProcAttr() + + ready := make(chan bool) + timedOut := time.After(ps.StartTimeout) + pollerStopCh := make(stopChannel) + go pollURLUntilOK(ps.HealthCheck.URL, ps.HealthCheck.PollInterval, ready, pollerStopCh) + + ps.waitDone = make(chan struct{}) + + if err := ps.Cmd.Start(); err != nil { + ps.errMu.Lock() + defer ps.errMu.Unlock() + ps.exited = true + return err + } + go func() { + defer close(ps.waitDone) + err := ps.Cmd.Wait() + + ps.errMu.Lock() + defer ps.errMu.Unlock() + ps.exitErr = err + ps.exited = true + }() + + select { + case <-ready: + ps.ready = true + return nil + case <-ps.waitDone: + close(pollerStopCh) + return fmt.Errorf("timeout waiting for process %s to start successfully "+ + "(it may have failed to start, or stopped unexpectedly before becoming ready)", + path.Base(ps.Path)) + case <-timedOut: + close(pollerStopCh) + if ps.Cmd != nil { + // intentionally ignore this -- we might've crashed, failed to start, etc + ps.Cmd.Process.Signal(syscall.SIGTERM) //nolint:errcheck + } + return fmt.Errorf("timeout waiting for process %s to start", path.Base(ps.Path)) + } +} + +// Exited returns true if the process exited, and may also +// return an error (as per Cmd.Wait) if the process did not +// exit with error code 0. +func (ps *State) Exited() (bool, error) { + ps.errMu.Lock() + defer ps.errMu.Unlock() + return ps.exited, ps.exitErr +} + +func pollURLUntilOK(url url.URL, interval time.Duration, ready chan bool, stopCh stopChannel) { + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + // there's probably certs *somewhere*, + // but it's fine to just skip validating + // them for health checks during testing + InsecureSkipVerify: true, //nolint:gosec + }, + }, + } + if interval <= 0 { + interval = 100 * time.Millisecond + } + for { + res, err := client.Get(url.String()) + if err == nil { + res.Body.Close() + if res.StatusCode == http.StatusOK { + ready <- true + return + } + } + + select { + case <-stopCh: + return + default: + time.Sleep(interval) + } + } +} + +// Stop stops this process gracefully, waits for its termination, and cleans up +// the CertDir if necessary. +func (ps *State) Stop() error { + // Always clear the directory if we need to. + defer func() { + if ps.DirNeedsCleaning { + _ = os.RemoveAll(ps.Dir) + } + }() + if ps.Cmd == nil { + return nil + } + if done, _ := ps.Exited(); done { + return nil + } + if err := ps.Cmd.Process.Signal(syscall.SIGTERM); err != nil { + return fmt.Errorf("unable to signal for process %s to stop: %w", ps.Path, err) + } + + timedOut := time.After(ps.StopTimeout) + + select { + case <-ps.waitDone: + break + case <-timedOut: + if err := ps.Cmd.Process.Signal(syscall.SIGKILL); err != nil { + return fmt.Errorf("unable to kill process %s: %w", ps.Path, err) + } + return fmt.Errorf("timeout waiting for process %s to stop", path.Base(ps.Path)) + } + ps.ready = false + return nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/LICENSE b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/LICENSE new file mode 100644 index 00000000000..8dada3edaf5 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/README.md b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/README.md new file mode 100644 index 00000000000..c03a4340378 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/README.md @@ -0,0 +1,119 @@ +# Envtest Binaries Manager + +This is a small tool that manages binaries for envtest. It can be used to +download new binaries, list currently installed and available ones, and +clean up versions. + +To use it, just go-install it with Golang 1.22+ (it's a separate, self-contained +module): + +```shell +go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest +``` + +If you are using Golang 1.20 or 1.21, use the `release-0.17` branch instead: + +```shell +go install sigs.k8s.io/controller-runtime/tools/setup-envtest@release-0.17 +``` + +For full documentation, run it with the `--help` flag, but here are some +examples: + +```shell +# download the latest envtest, and print out info about it +setup-envtest use + +# download the latest 1.19 envtest, and print out the path +setup-envtest use -p path 1.19.x! + +# switch to the most recent 1.21 envtest on disk +source <(setup-envtest use -i -p env 1.21.x) + +# list all available local versions for darwin/amd64 +setup-envtest list -i --os darwin --arch amd64 + +# remove all versions older than 1.16 from disk +setup-envtest cleanup <1.16 + +# use the value from $KUBEBUILDER_ASSETS if set, otherwise follow the normal +# logic for 'use' +setup-envtest --use-env + +# use the value from $KUBEBUILDER_ASSETS if set, otherwise use the latest +# installed version +setup-envtest use -i --use-env + +# sideload a pre-downloaded tarball as Kubernetes 1.16.2 into our store +setup-envtest sideload 1.16.2 < downloaded-envtest.tar.gz + +# Per default envtest binaries are downloaded from: +# https://raw.githubusercontent.com/kubernetes-sigs/controller-tools/master/envtest-releases.yaml +# To download from a custom index use the following: +setup-envtest use --index https://custom.com/envtest-releases.yaml + +``` + +## Where does it put all those binaries? + +By default, binaries are stored in a subdirectory of an OS-specific data +directory, as per the OS's conventions. + +On Linux, this is `$XDG_DATA_HOME`; on Windows, `%LocalAppData`; and on +OSX, `~/Library/Application Support`. + +There's an overall folder that holds all files, and inside that is +a folder for each version/platform pair. The exact directory structure is +not guaranteed, except that the leaf directory will contain the names +expected by envtest. You should always use `setup-envtest fetch` or +`setup-envtest switch` (generally with the `-p path` or `-p env` flags) to +get the directory that you should use. + +## Why do I have to do that `source <(blah blah blah)` thing + +This is a normal binary, not a shell script, so we can't set the parent +process's environment variables. If you use this by hand a lot and want +to save the typing, you could put something like the following in your +`~/.zshrc` (or similar for bash/fish/whatever, modified to those): + +```shell +setup-envtest() { + if (($@[(Ie)use])); then + source <($GOPATH/bin/setup-envtest "$@" -p env) + else + $GOPATH/bin/setup-envtest "$@" + fi +} +``` + +## What if I don't want to talk to the internet? + +There are a few options. + +First, you'll probably want to set the `-i/--installed` flag. If you want +to avoid forgetting to set this flag, set the `ENVTEST_INSTALLED_ONLY` +env variable, which will switch that flag on by default. + +Then, you have a few options for managing your binaries: + +- If you don't *really* want to manage with this tool, or you want to + respect the $KUBEBUILDER_ASSETS variable if it's set to something + outside the store, use the `use --use-env -i` command. + + `--use-env` makes the command unconditionally use the value of + KUBEBUILDER_ASSETS as long as it contains the required binaries, and + `-i` indicates that we only ever want to work with installed binaries. + + As noted about, you can use `ENVTEST_INSTALLED_ONLY=true` to switch `-i` + on by default, and you can use `ENVTEST_USE_ENV=true` to switch + `--use-env` on by default. + +- If you want to use this tool, but download your gziped tarballs + separately, you can use the `sideload` command. You'll need to use the + `-k/--version` flag to indicate which version you're sideloading. + + After that, it'll be as if you'd installed the binaries with `use`. + +- If you want to talk to some internal source via HTTP, you can simply set `--index` + The index must contain references to envtest binary archives in the same format as: + https://raw.githubusercontent.com/kubernetes-sigs/controller-tools/master/envtest-releases.yaml diff --git a/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/env/env.go b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/env/env.go new file mode 100644 index 00000000000..6168739eb60 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/env/env.go @@ -0,0 +1,478 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package env + +import ( + "context" + "errors" + "fmt" + "io" + "io/fs" + "path/filepath" + "sort" + "strings" + "text/tabwriter" + + "github.com/go-logr/logr" + "github.com/spf13/afero" // too bad fs.FS isn't writable :-/ + + "sigs.k8s.io/controller-runtime/tools/setup-envtest/remote" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/store" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" +) + +// Env represents an environment for downloading and otherwise manipulating +// envtest binaries. +// +// In general, the methods will use the Exit{,Cause} functions from this package +// to indicate errors. Catch them with a `defer HandleExitWithCode()`. +type Env struct { + // the following *must* be set on input + + // Platform is our current platform + Platform versions.PlatformItem + + // VerifySum indicates whether we should run checksums. + VerifySum bool + // NoDownload forces us to not contact remote services, + // looking only at local files instead. + NoDownload bool + // ForceDownload forces us to ignore local files and always + // contact remote services & re-download. + ForceDownload bool + + // Client is our remote client for contacting remote services. + Client remote.Client + + // Log allows us to log. + Log logr.Logger + + // the following *may* be set on input, or may be discovered + + // Version is the version(s) that we want to download + // (may be automatically retrieved later on). + Version versions.Spec + + // Store is used to load/store entries to/from disk. + Store *store.Store + + // FS is the file system to read from/write to for provisioning temp files + // for storing the archives temporarily. + FS afero.Afero + + // Out is the place to write output text to + Out io.Writer + + // manualPath is the manually discovered path from PathMatches, if + // a non-store path was used. It'll be printed by PrintInfo if present. + manualPath string +} + +// CheckCoherence checks that this environment has filled-out, coherent settings +// (e.g. NoDownload & ForceDownload aren't both set). +func (e *Env) CheckCoherence() { + if e.NoDownload && e.ForceDownload { + Exit(2, "cannot both skip downloading *and* force re-downloading") + } + + if e.Platform.OS == "" || e.Platform.Arch == "" { + Exit(2, "must specify non-empty OS and arch (did you specify bad --os or --arch values?)") + } +} + +func (e *Env) filter() store.Filter { + return store.Filter{Version: e.Version, Platform: e.Platform.Platform} +} + +func (e *Env) item() store.Item { + concreteVer := e.Version.AsConcrete() + if concreteVer == nil || e.Platform.IsWildcard() { + panic("no platform/version set") // unexpected, print stack trace + } + return store.Item{Version: *concreteVer, Platform: e.Platform.Platform} +} + +// ListVersions prints out all available versions matching this Env's +// platform & version selector (respecting NoDownload to figure +// out whether or not to match remote versions). +func (e *Env) ListVersions(ctx context.Context) { + out := tabwriter.NewWriter(e.Out, 4, 4, 2, ' ', 0) + defer out.Flush() + localVersions, err := e.Store.List(ctx, e.filter()) + if err != nil { + ExitCause(2, err, "unable to list installed versions") + } + for _, item := range localVersions { + // already filtered by onDiskVersions + fmt.Fprintf(out, "(installed)\tv%s\t%s\n", item.Version, item.Platform) + } + + if e.NoDownload { + return + } + + remoteVersions, err := e.Client.ListVersions(ctx) + if err != nil { + ExitCause(2, err, "unable list to available versions") + } + + for _, set := range remoteVersions { + if !e.Version.Matches(set.Version) { + continue + } + sort.Slice(set.Platforms, func(i, j int) bool { + return orderPlatforms(set.Platforms[i].Platform, set.Platforms[j].Platform) + }) + for _, plat := range set.Platforms { + if e.Platform.Matches(plat.Platform) { + fmt.Fprintf(out, "(available)\tv%s\t%s\n", set.Version, plat) + } + } + } +} + +// LatestVersion returns the latest version matching our version selector and +// platform from the remote server, with the corresponding checksum for later +// use as well. +func (e *Env) LatestVersion(ctx context.Context) (versions.Concrete, versions.PlatformItem) { + vers, err := e.Client.ListVersions(ctx) + if err != nil { + ExitCause(2, err, "unable to list versions to find latest one") + } + for _, set := range vers { + if !e.Version.Matches(set.Version) { + e.Log.V(1).Info("skipping non-matching version", "version", set.Version) + continue + } + // double-check that our platform is supported + for _, plat := range set.Platforms { + // NB(directxman12): we're already iterating in order, so no + // need to check if the wildcard is latest vs any + if e.Platform.Matches(plat.Platform) && e.Version.Matches(set.Version) { + return set.Version, plat + } + } + e.Log.Info("latest version not supported for your platform, checking older ones", "version", set.Version, "platform", e.Platform) + } + + Exit(2, "unable to find a version that was supported for platform %s", e.Platform) + return versions.Concrete{}, versions.PlatformItem{} // unreachable, but Go's type system can't express the "never" type +} + +// ExistsAndValid checks if our current (concrete) version & platform +// exist on disk (unless ForceDownload is set, in which cause it always +// returns false). +// +// Must be called after EnsureVersionIsSet so that we have a concrete +// Version selected. Must have a concrete platform, or ForceDownload +// must be set. +func (e *Env) ExistsAndValid() bool { + if e.ForceDownload { + // we always want to download, so don't check here + return false + } + + if e.Platform.IsWildcard() { + Exit(2, "you must have a concrete platform with this command -- you cannot use wildcard platforms with fetch or switch") + } + + exists, err := e.Store.Has(e.item()) + if err != nil { + ExitCause(2, err, "unable to check if existing version exists") + } + + if exists { + e.Log.Info("applicable version found on disk", "version", e.Version) + } + return exists +} + +// EnsureVersionIsSet ensures that we have a non-wildcard version +// configured. +// +// If necessary, it will enumerate on-disk and remote versions to accomplish +// this, finding a version that matches our version selector and platform. +// It will always yield a concrete version, it *may* yield a concrete platform +// as well. +func (e *Env) EnsureVersionIsSet(ctx context.Context) { + if e.Version.AsConcrete() != nil { + return + } + var localVer *versions.Concrete + var localPlat versions.Platform + + items, err := e.Store.List(ctx, e.filter()) + if err != nil { + ExitCause(2, err, "unable to determine installed versions") + } + + for _, item := range items { + if !e.Version.Matches(item.Version) || !e.Platform.Matches(item.Platform) { + e.Log.V(1).Info("skipping version, doesn't match", "version", item.Version, "platform", item.Platform) + continue + } + // NB(directxman12): we're already iterating in order, so no + // need to check if the wildcard is latest vs any + ver := item.Version // copy to avoid referencing iteration variable + localVer = &ver + localPlat = item.Platform + break + } + + if e.NoDownload || !e.Version.CheckLatest { + // no version specified, but we either + // + // a) shouldn't contact remote + // b) don't care to find the absolute latest + // + // so just find the latest local version + if localVer != nil { + e.Version.MakeConcrete(*localVer) + e.Platform.Platform = localPlat + return + } + if e.NoDownload { + Exit(2, "no applicable on-disk versions for %s found, you'll have to download one, or run list -i to see what you do have", e.Platform) + } + // if we didn't ask for the latest version, but don't have anything + // available, try the internet ;-) + } + + // no version specified and we need the latest in some capacity, so find latest from remote + // so find the latest local first, then compare it to the latest remote, and use whichever + // of the two is more recent. + e.Log.Info("no version specified, finding latest") + serverVer, platform := e.LatestVersion(ctx) + + // if we're not forcing a download, and we have a newer local version, just use that + if !e.ForceDownload && localVer != nil && localVer.NewerThan(serverVer) { + e.Platform.Platform = localPlat // update our data with hash + e.Version.MakeConcrete(*localVer) + return + } + + // otherwise, use the new version from the server + e.Platform = platform // update our data with hash + e.Version.MakeConcrete(serverVer) +} + +// Fetch ensures that the requested platform and version are on disk. +// You must call EnsureVersionIsSet before calling this method. +// +// If ForceDownload is set, we always download, otherwise we only download +// if we're missing the version on disk. +func (e *Env) Fetch(ctx context.Context) { + log := e.Log.WithName("fetch") + + // if we didn't just fetch it, grab the sum to verify + if e.VerifySum && e.Platform.Hash == nil { + if err := e.Client.FetchSum(ctx, *e.Version.AsConcrete(), &e.Platform); err != nil { + ExitCause(2, err, "unable to fetch hash for requested version") + } + } + if !e.VerifySum { + e.Platform.Hash = nil // skip verification + } + + var packedPath string + + // cleanup on error (needs to be here so it will happen after the other defers) + defer e.cleanupOnError(func() { + if packedPath != "" { + e.Log.V(1).Info("cleaning up downloaded archive", "path", packedPath) + if err := e.FS.Remove(packedPath); err != nil && !errors.Is(err, fs.ErrNotExist) { + e.Log.Error(err, "unable to clean up archive path", "path", packedPath) + } + } + }) + + archiveOut, err := e.FS.TempFile("", "*-"+e.Platform.ArchiveName(*e.Version.AsConcrete())) + if err != nil { + ExitCause(2, err, "unable to open file to write downloaded archive to") + } + defer archiveOut.Close() + packedPath = archiveOut.Name() + log.V(1).Info("writing downloaded archive", "path", packedPath) + + if err := e.Client.GetVersion(ctx, *e.Version.AsConcrete(), e.Platform, archiveOut); err != nil { + ExitCause(2, err, "unable to download requested version") + } + log.V(1).Info("downloaded archive", "path", packedPath) + + if err := archiveOut.Sync(); err != nil { // sync before reading back + ExitCause(2, err, "unable to flush downloaded archive file") + } + if _, err := archiveOut.Seek(0, 0); err != nil { + ExitCause(2, err, "unable to jump back to beginning of archive file to unzip") + } + + if err := e.Store.Add(ctx, e.item(), archiveOut); err != nil { + ExitCause(2, err, "unable to store version to disk") + } + + log.V(1).Info("removing archive from disk", "path", packedPath) + if err := e.FS.Remove(packedPath); err != nil { + // don't bail, this isn't fatal + log.Error(err, "unable to remove downloaded archive", "path", packedPath) + } +} + +// cleanup on error cleans up if we hit an exitCode error. +// +// Use it in a defer. +func (e *Env) cleanupOnError(extraCleanup func()) { + cause := recover() + if cause == nil { + return + } + // don't panic in a panic handler + var exit *exitCode + if asExit(cause, &exit) && exit.code != 0 { + e.Log.Info("cleaning up due to error") + // we already log in the function, and don't want to panic, so + // ignore the error + extraCleanup() + } + panic(cause) // re-start the panic now that we're done +} + +// Remove removes the data for our version selector & platform from disk. +func (e *Env) Remove(ctx context.Context) { + items, err := e.Store.Remove(ctx, e.filter()) + for _, item := range items { + fmt.Fprintf(e.Out, "removed %s\n", item) + } + if err != nil { + ExitCause(2, err, "unable to remove all requested version(s)") + } +} + +// PrintInfo prints out information about a single, current version +// and platform, according to the given formatting info. +func (e *Env) PrintInfo(printFmt PrintFormat) { + // use the manual path if it's set, otherwise use the standard path + path := e.manualPath + if e.manualPath == "" { + item := e.item() + var err error + path, err = e.Store.Path(item) + if err != nil { + ExitCause(2, err, "unable to get path for version %s", item) + } + } + switch printFmt { + case PrintOverview: + fmt.Fprintf(e.Out, "Version: %s\n", e.Version) + fmt.Fprintf(e.Out, "OS/Arch: %s\n", e.Platform) + if e.Platform.Hash != nil { + fmt.Fprintf(e.Out, "%s: %s\n", e.Platform.Hash.Type, e.Platform.Hash.Value) + } + fmt.Fprintf(e.Out, "Path: %s\n", path) + case PrintPath: + fmt.Fprint(e.Out, path) // NB(directxman12): no newline -- want the bare path here + case PrintEnv: + // quote in case there are spaces, etc in the path + // the weird string below works like this: + // - you can't escape quotes in shell + // - shell strings that are next to each other are concatenated (so "a""b""c" == "abc") + // - you can intermix quote styles using the above + // - so `'"'"'` --> CLOSE_QUOTE + "'" + OPEN_QUOTE + shellQuoted := strings.ReplaceAll(path, "'", `'"'"'`) + fmt.Fprintf(e.Out, "export KUBEBUILDER_ASSETS='%s'\n", shellQuoted) + default: + panic(fmt.Sprintf("unexpected print format %v", printFmt)) + } +} + +// EnsureBaseDirs ensures that the base packed and unpacked directories +// exist. +// +// This should be the first thing called after CheckCoherence. +func (e *Env) EnsureBaseDirs(ctx context.Context) { + if err := e.Store.Initialize(ctx); err != nil { + ExitCause(2, err, "unable to make sure store is initialized") + } +} + +// Sideload takes an input stream, and loads it as if it had been a downloaded .tar.gz file +// for the current *concrete* version and platform. +func (e *Env) Sideload(ctx context.Context, input io.Reader) { + log := e.Log.WithName("sideload") + if e.Version.AsConcrete() == nil || e.Platform.IsWildcard() { + Exit(2, "must specify a concrete version and platform to sideload. Make sure you've passed a version, like 'sideload 1.21.0'") + } + log.V(1).Info("sideloading from input stream to version", "version", e.Version, "platform", e.Platform) + if err := e.Store.Add(ctx, e.item(), input); err != nil { + ExitCause(2, err, "unable to sideload item to disk") + } +} + +var ( + // expectedExecutables are the executables that are checked in PathMatches + // for non-store paths. + expectedExecutables = []string{ + "kube-apiserver", + "etcd", + "kubectl", + } +) + +// PathMatches checks if the path (e.g. from the environment variable) +// matches this version & platform selector, and if so, returns true. +func (e *Env) PathMatches(value string) bool { + e.Log.V(1).Info("checking if (env var) path represents our desired version", "path", value) + if value == "" { + // if we're unset, + return false + } + + if e.versionFromPathName(value) { + e.Log.V(1).Info("path appears to be in our store, using that info", "path", value) + return true + } + + e.Log.V(1).Info("path is not in our store, checking for binaries", "path", value) + for _, expected := range expectedExecutables { + _, err := e.FS.Stat(filepath.Join(value, expected)) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + // one of our required binaries is missing, return false + e.Log.V(1).Info("missing required binary in (env var) path", "binary", expected, "path", value) + return false + } + ExitCause(2, err, "unable to check for existence of binary %s from existing (env var) path %s", value, expected) + } + } + + // success, all binaries present + e.Log.V(1).Info("all required binaries present in (env var) path, using that", "path", value) + + // don't bother checking the version, the user explicitly asked us to use this + // we don't know the version, so set it to wildcard + e.Version = versions.AnyVersion + e.Platform.OS = "*" + e.Platform.Arch = "*" + e.manualPath = value + return true +} + +// versionFromPathName checks if the given path's last component looks like one +// of our versions, and, if so, what version it represents. If successful, +// it'll set version and platform, and return true. Otherwise it returns +// false. +func (e *Env) versionFromPathName(value string) bool { + baseName := filepath.Base(value) + ver, pl := versions.ExtractWithPlatform(versions.VersionPlatformRE, baseName) + if ver == nil { + // not a version that we can tell + return false + } + + // yay we got a version! + e.Version.MakeConcrete(*ver) + e.Platform.Platform = pl + e.manualPath = value // might be outside our store, set this just in case + + return true +} diff --git a/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/env/exit.go b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/env/exit.go new file mode 100644 index 00000000000..ae393b593b1 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/env/exit.go @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package env + +import ( + "errors" + "fmt" + "os" +) + +// Exit exits with the given code and error message. +// +// Defer HandleExitWithCode in main to catch this and get the right behavior. +func Exit(code int, msg string, args ...interface{}) { + panic(&exitCode{ + code: code, + err: fmt.Errorf(msg, args...), + }) +} + +// ExitCause exits with the given code and error message, automatically +// wrapping the underlying error passed as well. +// +// Defer HandleExitWithCode in main to catch this and get the right behavior. +func ExitCause(code int, err error, msg string, args ...interface{}) { + args = append(args, err) + panic(&exitCode{ + code: code, + err: fmt.Errorf(msg+": %w", args...), + }) +} + +// exitCode is an error that indicates, on a panic, to exit with the given code +// and message. +type exitCode struct { + code int + err error +} + +func (c *exitCode) Error() string { + return fmt.Sprintf("%v (exit code %d)", c.err, c.code) +} +func (c *exitCode) Unwrap() error { + return c.err +} + +// asExit checks if the given (panic) value is an exitCode error, +// and if so stores it in the given pointer. It's roughly analogous +// to errors.As, except it works on recover() values. +func asExit(val interface{}, exit **exitCode) bool { + if val == nil { + return false + } + err, isErr := val.(error) + if !isErr { + return false + } + if !errors.As(err, exit) { + return false + } + return true +} + +// HandleExitWithCode handles panics of type exitCode, +// printing the status message and existing with the given +// exit code, or re-raising if not an exitCode error. +// +// This should be the first defer in your main function. +func HandleExitWithCode() { + if cause := recover(); CheckRecover(cause, func(code int, err error) { + fmt.Fprintln(os.Stderr, err.Error()) + os.Exit(code) + }) { + panic(cause) + } +} + +// CheckRecover checks the value of cause, calling the given callback +// if it's an exitCode error. It returns true if we should re-panic +// the cause. +// +// It's mainly useful for testing, normally you'd use HandleExitWithCode. +func CheckRecover(cause interface{}, cb func(int, error)) bool { + if cause == nil { + return false + } + var exitErr *exitCode + if !asExit(cause, &exitErr) { + // re-raise if it's not an exit error + return true + } + + cb(exitErr.code, exitErr.err) + return false +} diff --git a/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/env/helpers.go b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/env/helpers.go new file mode 100644 index 00000000000..2c98c88d959 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/env/helpers.go @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package env + +import ( + "fmt" + + "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" +) + +// orderPlatforms orders platforms by OS then arch. +func orderPlatforms(first, second versions.Platform) bool { + // sort by OS, then arch + if first.OS != second.OS { + return first.OS < second.OS + } + return first.Arch < second.Arch +} + +// PrintFormat indicates how to print out fetch and switch results. +// It's a valid pflag.Value so it can be used as a flag directly. +type PrintFormat int + +const ( + // PrintOverview prints human-readable data, + // including path, version, arch, and checksum (when available). + PrintOverview PrintFormat = iota + // PrintPath prints *only* the path, with no decoration. + PrintPath + // PrintEnv prints the path with the corresponding env variable, so that + // you can source the output like + // `source $(fetch-envtest switch -p env 1.20.x)`. + PrintEnv +) + +func (f PrintFormat) String() string { + switch f { + case PrintOverview: + return "overview" + case PrintPath: + return "path" + case PrintEnv: + return "env" + default: + panic(fmt.Sprintf("unexpected print format %d", int(f))) + } +} + +// Set sets the value of this as a flag. +func (f *PrintFormat) Set(val string) error { + switch val { + case "overview": + *f = PrintOverview + case "path": + *f = PrintPath + case "env": + *f = PrintEnv + default: + return fmt.Errorf("unknown print format %q, use one of overview|path|env", val) + } + return nil +} + +// Type is the type of this value as a flag. +func (PrintFormat) Type() string { + return "{overview|path|env}" +} diff --git a/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/main.go b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/main.go new file mode 100644 index 00000000000..3121e206fde --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/main.go @@ -0,0 +1,281 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package main + +import ( + goflag "flag" + "fmt" + "os" + "runtime" + + "github.com/go-logr/logr" + "github.com/go-logr/zapr" + "github.com/spf13/afero" + flag "github.com/spf13/pflag" + "go.uber.org/zap" + + envp "sigs.k8s.io/controller-runtime/tools/setup-envtest/env" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/remote" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/store" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/workflows" +) + +const ( + // envNoDownload is an env variable that can be set to always force + // the --installed-only, -i flag to be set. + envNoDownload = "ENVTEST_INSTALLED_ONLY" + // envUseEnv is an env variable that can be set to control the --use-env + // flag globally. + envUseEnv = "ENVTEST_USE_ENV" +) + +var ( + force = flag.Bool("force", false, "force re-downloading dependencies, even if they're already present and correct") + installedOnly = flag.BoolP("installed-only", "i", os.Getenv(envNoDownload) != "", + "only look at installed versions -- do not query the remote API server, "+ + "and error out if it would be necessary to") + verify = flag.Bool("verify", true, "verify dependencies while downloading") + useEnv = flag.Bool("use-env", os.Getenv(envUseEnv) != "", "whether to return the value of KUBEBUILDER_ASSETS if it's already set") + + targetOS = flag.String("os", runtime.GOOS, "os to download for (e.g. linux, darwin, for listing operations, use '*' to list all platforms)") + targetArch = flag.String("arch", runtime.GOARCH, "architecture to download for (e.g. amd64, for listing operations, use '*' to list all platforms)") + + // printFormat is the flag value for -p, --print. + printFormat = envp.PrintOverview + // zapLvl is the flag value for logging verbosity. + zapLvl = zap.WarnLevel + + binDir = flag.String("bin-dir", "", + "directory to store binary assets (default: $OS_SPECIFIC_DATA_DIR/envtest-binaries)") + + index = flag.String("index", remote.DefaultIndexURL, "index to discover envtest binaries") +) + +// TODO(directxman12): handle interrupts? + +// setupLogging configures a Zap logger. +func setupLogging() logr.Logger { + logCfg := zap.NewDevelopmentConfig() + logCfg.Level = zap.NewAtomicLevelAt(zapLvl) + zapLog, err := logCfg.Build() + if err != nil { + envp.ExitCause(1, err, "who logs the logger errors?") + } + return zapr.NewLogger(zapLog) +} + +// setupEnv initializes the environment from flags. +func setupEnv(globalLog logr.Logger, version string) *envp.Env { + log := globalLog.WithName("setup") + if *binDir == "" { + dataDir, err := store.DefaultStoreDir() + if err != nil { + envp.ExitCause(1, err, "unable to deterimine default binaries directory (use --bin-dir to manually override)") + } + + *binDir = dataDir + } + log.V(1).Info("using binaries directory", "dir", *binDir) + + client := &remote.HTTPClient{ + Log: globalLog.WithName("storage-client"), + IndexURL: *index, + } + log.V(1).Info("using HTTP client", "index", *index) + + env := &envp.Env{ + Log: globalLog, + Client: client, + VerifySum: *verify, + ForceDownload: *force, + NoDownload: *installedOnly, + Platform: versions.PlatformItem{ + Platform: versions.Platform{ + OS: *targetOS, + Arch: *targetArch, + }, + }, + FS: afero.Afero{Fs: afero.NewOsFs()}, + Store: store.NewAt(*binDir), + Out: os.Stdout, + } + + switch version { + case "", "latest": + env.Version = versions.LatestVersion + case "latest-on-disk": + // we sort by version, latest first, so this'll give us the latest on + // disk (as per the contract from env.List & store.List) + env.Version = versions.AnyVersion + env.NoDownload = true + default: + var err error + env.Version, err = versions.FromExpr(version) + if err != nil { + envp.ExitCause(1, err, "version be a valid version, or simply 'latest' or 'latest-on-disk'") + } + } + + env.CheckCoherence() + + return env +} + +func main() { + // exit with appropriate error codes -- this should be the first defer so + // that it's the last one executed. + defer envp.HandleExitWithCode() + + // set up flags + flag.Usage = func() { + name := os.Args[0] + fmt.Fprintf(os.Stderr, "Usage: %s [FLAGS] use|list|cleanup|sideload [VERSION]\n", name) + flag.PrintDefaults() + fmt.Fprintf(os.Stderr, + ` +Note: this command is currently alpha, and the usage/behavior may change from release to release. + +Examples: + + # download the latest envtest, and print out info about it + %[1]s use + + # download the latest 1.19 envtest, and print out the path + %[1]s use -p path 1.19.x! + + # switch to the most recent 1.21 envtest on disk + source <(%[1]s use -i -p env 1.21.x) + + # list all available local versions for darwin/amd64 + %[1]s list -i --os darwin --arch amd64 + + # remove all versions older than 1.16 from disk + %[1]s cleanup <1.16 + + # use the value from $KUBEBUILDER_ASSETS if set, otherwise follow the normal + # logic for 'use' + %[1]s --use-env + + # use the value from $KUBEBUILDER_ASSETS if set, otherwise use the latest + # installed version + %[1]s use -i --use-env + + # sideload a pre-downloaded tarball as Kubernetes 1.16.2 into our store + %[1]s sideload 1.16.2 < downloaded-envtest.tar.gz + +Commands: + + use: + get information for the requested version, downloading it if necessary and allowed. + Needs a concrete platform (no wildcards), but wildcard versions are supported. + + list: + list installed *and* available versions matching the given version & platform. + May have wildcard versions *and* platforms. + If the -i flag is passed, only installed versions are listed. + + cleanup: + remove all versions matching the given version & platform selector. + May have wildcard versions *and* platforms. + + sideload: + reads a .tar.gz file from stdin and expand it into the store. + must have a concrete version and platform. + +Versions: + + Versions take the form of a small subset of semver selectors. + + Basic semver whole versions are accepted: X.Y.Z. + Z may also be '*' or 'x' to match a wildcard. + You may also just write X.Y, which means X.Y.*. + + A version may be prefixed with '~' to match the most recent Z release + in the given Y release ( [X.Y.Z, X.Y+1.0) ). + + Finally, you may suffix the version with '!' to force checking the + remote API server for the latest version. + + For example: + + 1.16.x / 1.16.* / 1.16 # any 1.16 version + ~1.19.3 # any 1.19 version that's at least 1.19.3 + <1.17 # any release 1.17.x or below + 1.22.x! # the latest one 1.22 release available remotely + +Output: + + The fetch & switch commands respect the --print, -p flag. + + overview: human readable information + path: print out the path, by itself + env: print out the path in a form that can be sourced to use that version with envtest + + Other command have human-readable output formats only. + +Environment Variables: + + KUBEBUILDER_ASSETS: + --use-env will check this, and '-p/--print env' will return this. + If --use-env is true and this is set, we won't check our store + for versions -- we'll just immediately return whatever's in + this env var. + + %[2]s: + will switch the default of -i/--installed to true if set to any value + + %[3]s: + will switch the default of --use-env to true if set to any value + +`, name, envNoDownload, envUseEnv) + } + flag.CommandLine.AddGoFlag(&goflag.Flag{Name: "v", Usage: "logging level", Value: &zapLvl}) + flag.VarP(&printFormat, "print", "p", "what info to print after fetch-style commands (overview, path, env)") + needHelp := flag.Bool("help", false, "print out this help text") // register help so that we don't get an error at the end + flag.Parse() + + if *needHelp { + flag.Usage() + envp.Exit(2, "") + } + + // check our argument count + if numArgs := flag.NArg(); numArgs < 1 || numArgs > 2 { + flag.Usage() + envp.Exit(2, "please specify a command to use, and optionally a version selector") + } + + // set up logging + globalLog := setupLogging() + + // set up the environment + var version string + if flag.NArg() > 1 { + version = flag.Arg(1) + } + env := setupEnv(globalLog, version) + + // perform our main set of actions + switch action := flag.Arg(0); action { + case "use": + workflows.Use{ + UseEnv: *useEnv, + PrintFormat: printFormat, + AssetsPath: os.Getenv("KUBEBUILDER_ASSETS"), + }.Do(env) + case "list": + workflows.List{}.Do(env) + case "cleanup": + workflows.Cleanup{}.Do(env) + case "sideload": + workflows.Sideload{ + Input: os.Stdin, + PrintFormat: printFormat, + }.Do(env) + default: + flag.Usage() + envp.Exit(2, "unknown action %q", action) + } +} diff --git a/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/remote/client.go b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/remote/client.go new file mode 100644 index 00000000000..24efd6daffe --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/remote/client.go @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2024 The Kubernetes Authors + +package remote + +import ( + "context" + "io" + + "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" +) + +// Client is an interface to get and list envtest binary archives. +type Client interface { + ListVersions(ctx context.Context) ([]versions.Set, error) + + GetVersion(ctx context.Context, version versions.Concrete, platform versions.PlatformItem, out io.Writer) error + + FetchSum(ctx context.Context, ver versions.Concrete, pl *versions.PlatformItem) error +} diff --git a/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/remote/http_client.go b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/remote/http_client.go new file mode 100644 index 00000000000..0339654a82e --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/remote/http_client.go @@ -0,0 +1,214 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package remote + +import ( + "context" + "fmt" + "io" + "net/http" + "net/url" + "sort" + + "github.com/go-logr/logr" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" + "sigs.k8s.io/yaml" +) + +// DefaultIndexURL is the default index used in HTTPClient. +var DefaultIndexURL = "https://raw.githubusercontent.com/kubernetes-sigs/controller-tools/HEAD/envtest-releases.yaml" + +var _ Client = &HTTPClient{} + +// HTTPClient is a client for fetching versions of the envtest binary archives +// from an index via HTTP. +type HTTPClient struct { + // Log allows us to log. + Log logr.Logger + + // IndexURL is the URL of the index, defaults to DefaultIndexURL. + IndexURL string +} + +// Index represents an index of envtest binary archives. Example: +// +// releases: +// v1.28.0: +// envtest-v1.28.0-darwin-amd64.tar.gz: +// hash: +// selfLink: +type Index struct { + // Releases maps Kubernetes versions to Releases (envtest archives). + Releases map[string]Release `json:"releases"` +} + +// Release maps an archive name to an archive. +type Release map[string]Archive + +// Archive contains the self link to an archive and its hash. +type Archive struct { + Hash string `json:"hash"` + SelfLink string `json:"selfLink"` +} + +// ListVersions lists all available tools versions in the index, along +// with supported os/arch combos and the corresponding hash. +// +// The results are sorted with newer versions first. +func (c *HTTPClient) ListVersions(ctx context.Context) ([]versions.Set, error) { + index, err := c.getIndex(ctx) + if err != nil { + return nil, err + } + + knownVersions := map[versions.Concrete][]versions.PlatformItem{} + for _, releases := range index.Releases { + for archiveName, archive := range releases { + ver, details := versions.ExtractWithPlatform(versions.ArchiveRE, archiveName) + if ver == nil { + c.Log.V(1).Info("skipping archive -- does not appear to be a versioned tools archive", "name", archiveName) + continue + } + c.Log.V(1).Info("found version", "version", ver, "platform", details) + knownVersions[*ver] = append(knownVersions[*ver], versions.PlatformItem{ + Platform: details, + Hash: &versions.Hash{ + Type: versions.SHA512HashType, + Encoding: versions.HexHashEncoding, + Value: archive.Hash, + }, + }) + } + } + + res := make([]versions.Set, 0, len(knownVersions)) + for ver, details := range knownVersions { + res = append(res, versions.Set{Version: ver, Platforms: details}) + } + // sort in inverse order so that the newest one is first + sort.Slice(res, func(i, j int) bool { + first, second := res[i].Version, res[j].Version + return first.NewerThan(second) + }) + + return res, nil +} + +// GetVersion downloads the given concrete version for the given concrete platform, writing it to the out. +func (c *HTTPClient) GetVersion(ctx context.Context, version versions.Concrete, platform versions.PlatformItem, out io.Writer) error { + index, err := c.getIndex(ctx) + if err != nil { + return err + } + + var loc *url.URL + var name string + for _, releases := range index.Releases { + for archiveName, archive := range releases { + ver, details := versions.ExtractWithPlatform(versions.ArchiveRE, archiveName) + if ver == nil { + c.Log.V(1).Info("skipping archive -- does not appear to be a versioned tools archive", "name", archiveName) + continue + } + + if *ver == version && details.OS == platform.OS && details.Arch == platform.Arch { + loc, err = url.Parse(archive.SelfLink) + if err != nil { + return fmt.Errorf("error parsing selfLink %q, %w", loc, err) + } + name = archiveName + break + } + } + } + if name == "" { + return fmt.Errorf("unable to find archive for %s (%s,%s)", version, platform.OS, platform.Arch) + } + + req, err := http.NewRequestWithContext(ctx, "GET", loc.String(), nil) + if err != nil { + return fmt.Errorf("unable to construct request to fetch %s: %w", name, err) + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return fmt.Errorf("unable to fetch %s (%s): %w", name, req.URL, err) + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + return fmt.Errorf("unable fetch %s (%s) -- got status %q", name, req.URL, resp.Status) + } + + return readBody(resp, out, name, platform) +} + +// FetchSum fetches the checksum for the given concrete version & platform into +// the given platform item. +func (c *HTTPClient) FetchSum(ctx context.Context, version versions.Concrete, platform *versions.PlatformItem) error { + index, err := c.getIndex(ctx) + if err != nil { + return err + } + + for _, releases := range index.Releases { + for archiveName, archive := range releases { + ver, details := versions.ExtractWithPlatform(versions.ArchiveRE, archiveName) + if ver == nil { + c.Log.V(1).Info("skipping archive -- does not appear to be a versioned tools archive", "name", archiveName) + continue + } + + if *ver == version && details.OS == platform.OS && details.Arch == platform.Arch { + platform.Hash = &versions.Hash{ + Type: versions.SHA512HashType, + Encoding: versions.HexHashEncoding, + Value: archive.Hash, + } + return nil + } + } + } + + return fmt.Errorf("unable to find archive for %s (%s,%s)", version, platform.OS, platform.Arch) +} + +func (c *HTTPClient) getIndex(ctx context.Context) (*Index, error) { + indexURL := c.IndexURL + if indexURL == "" { + indexURL = DefaultIndexURL + } + + loc, err := url.Parse(indexURL) + if err != nil { + return nil, fmt.Errorf("unable to parse index URL: %w", err) + } + + c.Log.V(1).Info("listing versions", "index", indexURL) + + req, err := http.NewRequestWithContext(ctx, "GET", loc.String(), nil) + if err != nil { + return nil, fmt.Errorf("unable to construct request to get index: %w", err) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, fmt.Errorf("unable to perform request to get index: %w", err) + } + + defer resp.Body.Close() + if resp.StatusCode != 200 { + return nil, fmt.Errorf("unable to get index -- got status %q", resp.Status) + } + + responseBody, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("unable to get index -- unable to read body %w", err) + } + + var index Index + if err := yaml.Unmarshal(responseBody, &index); err != nil { + return nil, fmt.Errorf("unable to unmarshal index: %w", err) + } + return &index, nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/remote/read_body.go b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/remote/read_body.go new file mode 100644 index 00000000000..650e41282c9 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/remote/read_body.go @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2024 The Kubernetes Authors + +package remote + +import ( + //nolint:gosec // We're aware that md5 is a weak cryptographic primitive, but we don't have a choice here. + "crypto/md5" + "crypto/sha512" + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "hash" + "io" + "net/http" + + "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" +) + +func readBody(resp *http.Response, out io.Writer, archiveName string, platform versions.PlatformItem) error { + if platform.Hash != nil { + // stream in chunks to do the checksum, don't load the whole thing into + // memory to avoid causing issues with big files. + buf := make([]byte, 32*1024) // 32KiB, same as io.Copy + var hasher hash.Hash + switch platform.Hash.Type { + case versions.SHA512HashType: + hasher = sha512.New() + case versions.MD5HashType: + hasher = md5.New() //nolint:gosec // We're aware that md5 is a weak cryptographic primitive, but we don't have a choice here. + default: + return fmt.Errorf("hash type %s not implemented", platform.Hash.Type) + } + for cont := true; cont; { + amt, err := resp.Body.Read(buf) + if err != nil && !errors.Is(err, io.EOF) { + return fmt.Errorf("unable read next chunk of %s: %w", archiveName, err) + } + if amt > 0 { + // checksum never returns errors according to docs + hasher.Write(buf[:amt]) + if _, err := out.Write(buf[:amt]); err != nil { + return fmt.Errorf("unable write next chunk of %s: %w", archiveName, err) + } + } + cont = amt > 0 && !errors.Is(err, io.EOF) + } + + var sum string + switch platform.Hash.Encoding { + case versions.Base64HashEncoding: + sum = base64.StdEncoding.EncodeToString(hasher.Sum(nil)) + case versions.HexHashEncoding: + sum = hex.EncodeToString(hasher.Sum(nil)) + default: + return fmt.Errorf("hash encoding %s not implemented", platform.Hash.Encoding) + } + if sum != platform.Hash.Value { + return fmt.Errorf("checksum mismatch for %s: %s (computed) != %s (reported)", archiveName, sum, platform.Hash.Value) + } + } else if _, err := io.Copy(out, resp.Body); err != nil { + return fmt.Errorf("unable to download %s: %w", archiveName, err) + } + return nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/store/helpers.go b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/store/helpers.go new file mode 100644 index 00000000000..30902187e92 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/store/helpers.go @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package store + +import ( + "errors" + "os" + "path/filepath" + "runtime" +) + +// DefaultStoreDir returns the default location for the store. +// It's dependent on operating system: +// +// - Windows: %LocalAppData%\kubebuilder-envtest +// - OSX: ~/Library/Application Support/io.kubebuilder.envtest +// - Others: ${XDG_DATA_HOME:-~/.local/share}/kubebuilder-envtest +// +// Otherwise, it errors out. Note that these paths must not be relied upon +// manually. +func DefaultStoreDir() (string, error) { + var baseDir string + + // find the base data directory + switch runtime.GOOS { + case "windows": + baseDir = os.Getenv("LocalAppData") + if baseDir == "" { + return "", errors.New("%LocalAppData% is not defined") + } + case "darwin", "ios": + homeDir := os.Getenv("HOME") + if homeDir == "" { + return "", errors.New("$HOME is not defined") + } + baseDir = filepath.Join(homeDir, "Library/Application Support") + default: + baseDir = os.Getenv("XDG_DATA_HOME") + if baseDir == "" { + homeDir := os.Getenv("HOME") + if homeDir == "" { + return "", errors.New("neither $XDG_DATA_HOME nor $HOME are defined") + } + baseDir = filepath.Join(homeDir, ".local/share") + } + } + + // append our program-specific dir to it (OSX has a slightly different + // convention so try to follow that). + switch runtime.GOOS { + case "darwin", "ios": + return filepath.Join(baseDir, "io.kubebuilder.envtest"), nil + default: + return filepath.Join(baseDir, "kubebuilder-envtest"), nil + } +} diff --git a/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/store/store.go b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/store/store.go new file mode 100644 index 00000000000..2ee0b64dec5 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/store/store.go @@ -0,0 +1,305 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package store + +import ( + "archive/tar" + "compress/gzip" + "context" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "sort" + + "github.com/go-logr/logr" + "github.com/spf13/afero" + + "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" +) + +// TODO(directxman12): error messages don't show full path, which is gonna make +// things hard to debug + +// Item is a version-platform pair. +type Item struct { + Version versions.Concrete + Platform versions.Platform +} + +// dirName returns the directory name in the store for this item. +func (i Item) dirName() string { + return i.Platform.BaseName(i.Version) +} +func (i Item) String() string { + return fmt.Sprintf("%s (%s)", i.Version, i.Platform) +} + +// Filter is a version spec & platform selector (i.e. platform +// potentially with wildcards) to filter store items. +type Filter struct { + Version versions.Spec + Platform versions.Platform +} + +// Matches checks if this filter matches the given item. +func (f Filter) Matches(item Item) bool { + return f.Version.Matches(item.Version) && f.Platform.Matches(item.Platform) +} + +// Store knows how to list, load, store, and delete envtest tools. +type Store struct { + // Root is the root FS that the store stores in. You'll probably + // want to use a BasePathFS to scope it down to a particular directory. + // + // Note that if for some reason there are nested BasePathFSes, and they're + // interrupted by a non-BasePathFS, Path won't work properly. + Root afero.Fs +} + +// NewAt creates a new store on disk at the given path. +func NewAt(path string) *Store { + return &Store{ + Root: afero.NewBasePathFs(afero.NewOsFs(), path), + } +} + +// Initialize ensures that the store is all set up on disk, etc. +func (s *Store) Initialize(ctx context.Context) error { + log, err := logr.FromContext(ctx) + if err != nil { + return err + } + + log.V(1).Info("ensuring base binaries dir exists") + if err := s.unpackedBase().MkdirAll("", 0755); err != nil { + return fmt.Errorf("unable to make sure base binaries dir exists: %w", err) + } + return nil +} + +// Has checks if an item exists in the store. +func (s *Store) Has(item Item) (bool, error) { + path := s.unpackedPath(item.dirName()) + _, err := path.Stat("") + if err != nil && !errors.Is(err, afero.ErrFileNotFound) { + return false, fmt.Errorf("unable to check if version-platform dir exists: %w", err) + } + return err == nil, nil +} + +// List lists all items matching the given filter. +// +// Results are stored by version (newest first), and OS/arch (consistently, +// but no guaranteed ordering). +func (s *Store) List(ctx context.Context, matching Filter) ([]Item, error) { + var res []Item + if err := s.eachItem(ctx, matching, func(_ string, item Item) { + res = append(res, item) + }); err != nil { + return nil, fmt.Errorf("unable to list version-platform pairs in store: %w", err) + } + + sort.Slice(res, func(i, j int) bool { + if !res[i].Version.Matches(res[j].Version) { + return res[i].Version.NewerThan(res[j].Version) + } + return orderPlatforms(res[i].Platform, res[j].Platform) + }) + + return res, nil +} + +// Add adds this item to the store, with the given contents (a .tar.gz file). +func (s *Store) Add(ctx context.Context, item Item, contents io.Reader) (resErr error) { + log, err := logr.FromContext(ctx) + if err != nil { + return err + } + + itemName := item.dirName() + log = log.WithValues("version-platform", itemName) + itemPath := s.unpackedPath(itemName) + + // make sure to clean up if we hit an error + defer func() { + if resErr != nil { + // intentially ignore this because we can't really do anything + err := s.removeItem(itemPath) + if err != nil { + log.Error(err, "unable to clean up partially added version-platform pair after error") + } + } + }() + + log.V(1).Info("ensuring version-platform binaries dir exists and is empty & writable") + _, err = itemPath.Stat("") + if err != nil && !errors.Is(err, afero.ErrFileNotFound) { + return fmt.Errorf("unable to ensure version-platform binaries dir %s exists", itemName) + } + if err == nil { // exists + log.V(1).Info("cleaning up old version-platform binaries dir") + if err := s.removeItem(itemPath); err != nil { + return fmt.Errorf("unable to clean up existing version-platform binaries dir %s", itemName) + } + } + if err := itemPath.MkdirAll("", 0755); err != nil { + return fmt.Errorf("unable to make sure entry dir %s exists", itemName) + } + + log.V(1).Info("extracting archive") + gzStream, err := gzip.NewReader(contents) + if err != nil { + return fmt.Errorf("unable to start un-gz-ing entry archive") + } + tarReader := tar.NewReader(gzStream) + + var header *tar.Header + for header, err = tarReader.Next(); err == nil; header, err = tarReader.Next() { + if header.Typeflag != tar.TypeReg { // TODO(directxman12): support symlinks, etc? + log.V(1).Info("skipping non-regular-file entry in archive", "entry", header.Name) + continue + } + // just dump all files to the main path, ignoring the prefixed directory + // paths -- they're redundant. We also ignore bits for the most part (except for X), + // preferfing our own scheme. + targetPath := filepath.Base(header.Name) + log.V(1).Info("writing archive file to disk", "archive file", header.Name, "on-disk file", targetPath) + perms := 0555 & header.Mode // make sure we're at most r+x + binOut, err := itemPath.OpenFile(targetPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, os.FileMode(perms)) + if err != nil { + return fmt.Errorf("unable to create file %s from archive to disk for version-platform pair %s", targetPath, itemName) + } + if err := func() error { // IIFE to get the defer properly in a loop + defer binOut.Close() + if _, err := io.Copy(binOut, tarReader); err != nil { //nolint:gosec + return fmt.Errorf("unable to write file %s from archive to disk for version-platform pair %s", targetPath, itemName) + } + return nil + }(); err != nil { + return err + } + } + if err != nil && !errors.Is(err, io.EOF) { //nolint:govet + return fmt.Errorf("unable to finish un-tar-ing the downloaded archive: %w", err) + } + log.V(1).Info("unpacked archive") + + log.V(1).Info("switching version-platform directory to read-only") + if err := itemPath.Chmod("", 0555); err != nil { + // don't bail, this isn't fatal + log.Error(err, "unable to make version-platform directory read-only") + } + return nil +} + +// Remove removes all items matching the given filter. +// +// It returns a list of the successfully removed items (even in the case +// of an error). +func (s *Store) Remove(ctx context.Context, matching Filter) ([]Item, error) { + log, err := logr.FromContext(ctx) + if err != nil { + return nil, err + } + + var removed []Item + var savedErr error + if err := s.eachItem(ctx, matching, func(name string, item Item) { + log.V(1).Info("Removing version-platform pair at path", "version-platform", item, "path", name) + + if err := s.removeItem(s.unpackedPath(name)); err != nil { + log.Error(err, "unable to make existing version-platform dir writable to clean it up", "path", name) + savedErr = fmt.Errorf("unable to remove version-platform pair %s (dir %s): %w", item, name, err) + return // don't mark this as removed in the report + } + removed = append(removed, item) + }); err != nil { + return removed, fmt.Errorf("unable to list version-platform pairs to figure out what to delete: %w", err) + } + if savedErr != nil { + return removed, savedErr + } + return removed, nil +} + +// Path returns an actual path that case be used to access this item. +func (s *Store) Path(item Item) (string, error) { + path := s.unpackedPath(item.dirName()) + // NB(directxman12): we need root's realpath because RealPath only + // looks at its own path, and so thus doesn't prepend the underlying + // root's base path. + // + // Technically, if we're fed something that's double wrapped as root, + // this'll be wrong, but this is basically as much as we can do + return afero.FullBaseFsPath(path.(*afero.BasePathFs), ""), nil +} + +// unpackedBase returns the directory in which item dirs lives. +func (s *Store) unpackedBase() afero.Fs { + return afero.NewBasePathFs(s.Root, "k8s") +} + +// unpackedPath returns the item dir with this name. +func (s *Store) unpackedPath(name string) afero.Fs { + return afero.NewBasePathFs(s.unpackedBase(), name) +} + +// eachItem iterates through the on-disk versions that match our version & platform selector, +// calling the callback for each. +func (s *Store) eachItem(ctx context.Context, filter Filter, cb func(name string, item Item)) error { + log, err := logr.FromContext(ctx) + if err != nil { + return err + } + + entries, err := afero.ReadDir(s.unpackedBase(), "") + if err != nil { + return fmt.Errorf("unable to list folders in store's unpacked directory: %w", err) + } + + for _, entry := range entries { + if !entry.IsDir() { + log.V(1).Info("skipping dir entry, not a folder", "entry", entry.Name()) + continue + } + ver, pl := versions.ExtractWithPlatform(versions.VersionPlatformRE, entry.Name()) + if ver == nil { + log.V(1).Info("skipping dir entry, not a version", "entry", entry.Name()) + continue + } + item := Item{Version: *ver, Platform: pl} + + if !filter.Matches(item) { + log.V(1).Info("skipping on disk version, does not match version and platform selectors", "platform", pl, "version", ver, "entry", entry.Name()) + continue + } + + cb(entry.Name(), item) + } + + return nil +} + +// removeItem removes the given item directory from disk. +func (s *Store) removeItem(itemDir afero.Fs) error { + if err := itemDir.Chmod("", 0755); err != nil { + // no point in trying to remove if we can't fix the permissions, bail here + return fmt.Errorf("unable to make version-platform dir writable: %w", err) + } + if err := itemDir.RemoveAll(""); err != nil && !errors.Is(err, afero.ErrFileNotFound) { + return fmt.Errorf("unable to remove version-platform dir: %w", err) + } + return nil +} + +// orderPlatforms orders platforms by OS then arch. +func orderPlatforms(first, second versions.Platform) bool { + // sort by OS, then arch + if first.OS != second.OS { + return first.OS < second.OS + } + return first.Arch < second.Arch +} diff --git a/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/versions/parse.go b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/versions/parse.go new file mode 100644 index 00000000000..cd25710b2b1 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/versions/parse.go @@ -0,0 +1,120 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package versions + +import ( + "fmt" + "regexp" + "strconv" +) + +var ( + // baseVersionRE is a semver-ish version -- either X.Y.Z, X.Y, or X.Y.{*|x}. + baseVersionRE = `(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)(?:\.(?P0|[1-9]\d*|x|\*))?` + // versionExprRe matches valid version input for FromExpr. + versionExprRE = regexp.MustCompile(`^(?P<|~|<=)?` + baseVersionRE + `(?P!)?$`) + + // ConcreteVersionRE matches a concrete version anywhere in the string. + ConcreteVersionRE = regexp.MustCompile(`(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)`) +) + +// FromExpr extracts a version from a string in the form of a semver version, +// where X, Y, and Z may also be wildcards ('*', 'x'), +// and pre-release names & numbers may also be wildcards. The prerelease section is slightly +// restricted to match what k8s does. +// The whole string is a version selector as follows: +// - X.Y.Z matches version X.Y.Z where x, y, and z are +// are ints >= 0, and Z may be '*' or 'x' +// - X.Y is equivalent to X.Y.* +// - ~X.Y.Z means >= X.Y.Z && < X.Y+1.0 +// - = comparisons, if we use + // wildcards with a selector we can just set them to zero. + if verInfo.Patch == AnyPoint { + verInfo.Patch = PointVersion(0) + } + baseVer := *verInfo.AsConcrete() + spec.Selector = TildeSelector{Concrete: baseVer} + default: + panic("unreachable: mismatch between FromExpr and its RE in selector") + } + + return spec, nil +} + +// PointVersionFromValidString extracts a point version +// from the corresponding string representation, which may +// be a number >= 0, or x|* (AnyPoint). +// +// Anything else will cause a panic (use this on strings +// extracted from regexes). +func PointVersionFromValidString(str string) PointVersion { + switch str { + case "*", "x": + return AnyPoint + default: + ver, err := strconv.Atoi(str) + if err != nil { + panic(err) + } + return PointVersion(ver) + } +} + +// PatchSelectorFromMatch constructs a simple selector according to the +// ParseExpr rules out of pre-validated sections. +// +// re must include name captures for major, minor, patch, prenum, and prelabel +// +// Any bad input may cause a panic. Use with when you got the parts from an RE match. +func PatchSelectorFromMatch(match []string, re *regexp.Regexp) PatchSelector { + // already parsed via RE, should be fine to ignore errors unless it's a + // *huge* number + major, err := strconv.Atoi(match[re.SubexpIndex("major")]) + if err != nil { + panic("invalid input passed as patch selector (invalid state)") + } + minor, err := strconv.Atoi(match[re.SubexpIndex("minor")]) + if err != nil { + panic("invalid input passed as patch selector (invalid state)") + } + + // patch is optional, means wildcard if left off + patch := AnyPoint + if patchRaw := match[re.SubexpIndex("patch")]; patchRaw != "" { + patch = PointVersionFromValidString(patchRaw) + } + return PatchSelector{ + Major: major, + Minor: minor, + Patch: patch, + } +} diff --git a/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/versions/platform.go b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/versions/platform.go new file mode 100644 index 00000000000..1cfbd05c065 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/versions/platform.go @@ -0,0 +1,123 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package versions + +import ( + "fmt" + "regexp" +) + +// Platform contains OS & architecture information +// Either may be '*' to indicate a wildcard match. +type Platform struct { + OS string + Arch string +} + +// Matches indicates if this platform matches the other platform, +// potentially with wildcard values. +func (p Platform) Matches(other Platform) bool { + return (p.OS == other.OS || p.OS == "*" || other.OS == "*") && + (p.Arch == other.Arch || p.Arch == "*" || other.Arch == "*") +} + +// IsWildcard checks if either OS or Arch are set to wildcard values. +func (p Platform) IsWildcard() bool { + return p.OS == "*" || p.Arch == "*" +} +func (p Platform) String() string { + return fmt.Sprintf("%s/%s", p.OS, p.Arch) +} + +// BaseName returns the base directory name that fully identifies a given +// version and platform. +func (p Platform) BaseName(ver Concrete) string { + return fmt.Sprintf("%d.%d.%d-%s-%s", ver.Major, ver.Minor, ver.Patch, p.OS, p.Arch) +} + +// ArchiveName returns the full archive name for this version and platform. +func (p Platform) ArchiveName(ver Concrete) string { + return "envtest-v" + p.BaseName(ver) + ".tar.gz" +} + +// PlatformItem represents a platform with corresponding +// known metadata for its download. +type PlatformItem struct { + Platform + + *Hash +} + +// Hash of an archive with envtest binaries. +type Hash struct { + // Type of the hash. + // controller-tools uses SHA512HashType. + Type HashType + + // Encoding of the hash value. + // controller-tools uses HexHashEncoding. + Encoding HashEncoding + + // Value of the hash. + Value string +} + +// HashType is the type of a hash. +type HashType string + +const ( + // SHA512HashType represents a sha512 hash + SHA512HashType HashType = "sha512" + + // MD5HashType represents a md5 hash + MD5HashType HashType = "md5" +) + +// HashEncoding is the encoding of a hash +type HashEncoding string + +const ( + // Base64HashEncoding represents base64 encoding + Base64HashEncoding HashEncoding = "base64" + + // HexHashEncoding represents hex encoding + HexHashEncoding HashEncoding = "hex" +) + +// Set is a concrete version and all the corresponding platforms that it's available for. +type Set struct { + Version Concrete + Platforms []PlatformItem +} + +// ExtractWithPlatform produces a version & platform from the given regular expression +// and string that should match it. If no match is found, Version will be nil. +// +// The regular expression must have the following capture groups: +// major, minor, patch, prelabel, prenum, os, arch, and must not support wildcard +// versions. +func ExtractWithPlatform(re *regexp.Regexp, name string) (*Concrete, Platform) { + match := re.FindStringSubmatch(name) + if match == nil { + return nil, Platform{} + } + verInfo := PatchSelectorFromMatch(match, re) + if verInfo.AsConcrete() == nil { + panic(fmt.Sprintf("%v", verInfo)) + } + // safe to convert, we've ruled out wildcards in our RE + return verInfo.AsConcrete(), Platform{ + OS: match[re.SubexpIndex("os")], + Arch: match[re.SubexpIndex("arch")], + } +} + +var ( + versionPlatformREBase = ConcreteVersionRE.String() + `-(?P\w+)-(?P\w+)` + // VersionPlatformRE matches concrete version-platform strings. + VersionPlatformRE = regexp.MustCompile(`^` + versionPlatformREBase + `$`) + // ArchiveRE matches concrete version-platform.tar.gz strings. + // The archives published to GitHub releases by controller-tools use the "envtest-v" prefix (e.g. "envtest-v1.30.0-darwin-amd64.tar.gz"). + ArchiveRE = regexp.MustCompile(`^envtest-v` + versionPlatformREBase + `\.tar\.gz$`) +) diff --git a/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/versions/version.go b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/versions/version.go new file mode 100644 index 00000000000..945a95006ff --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/versions/version.go @@ -0,0 +1,234 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package versions + +import ( + "fmt" + "strconv" +) + +// NB(directxman12): much of this is custom instead of using a library because +// a) none of the standard libraries have hashable version types (for valid reasons, +// but we can use a restricted subset for our usecase) +// b) everybody has their own definition of how selectors work anyway + +// NB(directxman12): pre-release support is... complicated with selectors +// if we end up needing it, think carefully about what a wildcard prerelease +// type means (does it include "not a prerelease"?), and what <=1.17.3-x.x means. + +// Concrete is a concrete Kubernetes-style semver version. +type Concrete struct { + Major, Minor, Patch int +} + +// AsConcrete returns this version. +func (c Concrete) AsConcrete() *Concrete { + return &c +} + +// NewerThan checks if the given other version is newer than this one. +func (c Concrete) NewerThan(other Concrete) bool { + if c.Major != other.Major { + return c.Major > other.Major + } + if c.Minor != other.Minor { + return c.Minor > other.Minor + } + return c.Patch > other.Patch +} + +// Matches checks if this version is equal to the other one. +func (c Concrete) Matches(other Concrete) bool { + return c == other +} + +func (c Concrete) String() string { + return fmt.Sprintf("%d.%d.%d", c.Major, c.Minor, c.Patch) +} + +// PatchSelector selects a set of versions where the patch is a wildcard. +type PatchSelector struct { + Major, Minor int + Patch PointVersion +} + +func (s PatchSelector) String() string { + return fmt.Sprintf("%d.%d.%s", s.Major, s.Minor, s.Patch) +} + +// Matches checks if the given version matches this selector. +func (s PatchSelector) Matches(ver Concrete) bool { + return s.Major == ver.Major && s.Minor == ver.Minor && s.Patch.Matches(ver.Patch) +} + +// AsConcrete returns nil if there are wildcards in this selector, +// and the concrete version that this selects otherwise. +func (s PatchSelector) AsConcrete() *Concrete { + if s.Patch == AnyPoint { + return nil + } + + return &Concrete{ + Major: s.Major, + Minor: s.Minor, + Patch: int(s.Patch), // safe to cast, we've just checked wildcards above + } +} + +// TildeSelector selects [X.Y.Z, X.Y+1.0). +type TildeSelector struct { + Concrete +} + +// Matches checks if the given version matches this selector. +func (s TildeSelector) Matches(ver Concrete) bool { + if s.Concrete.Matches(ver) { + // easy, "exact" match + return true + } + return ver.Major == s.Major && ver.Minor == s.Minor && ver.Patch >= s.Patch +} +func (s TildeSelector) String() string { + return "~" + s.Concrete.String() +} + +// AsConcrete returns nil (this is never a concrete version). +func (s TildeSelector) AsConcrete() *Concrete { + return nil +} + +// LessThanSelector selects versions older than the given one +// (mainly useful for cleaning up). +type LessThanSelector struct { + PatchSelector + OrEquals bool +} + +// Matches checks if the given version matches this selector. +func (s LessThanSelector) Matches(ver Concrete) bool { + if s.Major != ver.Major { + return s.Major > ver.Major + } + if s.Minor != ver.Minor { + return s.Minor > ver.Minor + } + if !s.Patch.Matches(ver.Patch) { + // matches rules out a wildcard, so it's fine to compare as normal numbers + return int(s.Patch) > ver.Patch + } + return s.OrEquals +} +func (s LessThanSelector) String() string { + if s.OrEquals { + return "<=" + s.PatchSelector.String() + } + return "<" + s.PatchSelector.String() +} + +// AsConcrete returns nil (this is never a concrete version). +func (s LessThanSelector) AsConcrete() *Concrete { + return nil +} + +// AnySelector matches any version at all. +type AnySelector struct{} + +// Matches checks if the given version matches this selector. +func (AnySelector) Matches(_ Concrete) bool { return true } + +// AsConcrete returns nil (this is never a concrete version). +func (AnySelector) AsConcrete() *Concrete { return nil } +func (AnySelector) String() string { return "*" } + +// Selector selects some concrete version or range of versions. +type Selector interface { + // AsConcrete tries to return this selector as a concrete version. + // If the selector would only match a single version, it'll return + // that, otherwise it'll return nil. + AsConcrete() *Concrete + // Matches checks if this selector matches the given concrete version. + Matches(ver Concrete) bool + String() string +} + +// Spec matches some version or range of versions, and tells us how to deal with local and +// remote when selecting a version. +type Spec struct { + Selector + + // CheckLatest tells us to check the remote server for the latest + // version that matches our selector, instead of just relying on + // matching local versions. + CheckLatest bool +} + +// MakeConcrete replaces the contents of this spec with one that +// matches the given concrete version (without checking latest +// from the server). +func (s *Spec) MakeConcrete(ver Concrete) { + s.Selector = ver + s.CheckLatest = false +} + +// AsConcrete returns the underlying selector as a concrete version, if +// possible. +func (s Spec) AsConcrete() *Concrete { + return s.Selector.AsConcrete() +} + +// Matches checks if the underlying selector matches the given version. +func (s Spec) Matches(ver Concrete) bool { + return s.Selector.Matches(ver) +} + +func (s Spec) String() string { + res := s.Selector.String() + if s.CheckLatest { + res += "!" + } + return res +} + +// PointVersion represents a wildcard (patch) version +// or concrete number. +type PointVersion int + +const ( + // AnyPoint matches any point version. + AnyPoint PointVersion = -1 +) + +// Matches checks if a point version is compatible +// with a concrete point version. +// Two point versions are compatible if they are +// a) both concrete +// b) one is a wildcard. +func (p PointVersion) Matches(other int) bool { + switch p { + case AnyPoint: + return true + default: + return int(p) == other + } +} +func (p PointVersion) String() string { + switch p { + case AnyPoint: + return "*" + default: + return strconv.Itoa(int(p)) + } +} + +var ( + // LatestVersion matches the most recent version on the remote server. + LatestVersion = Spec{ + Selector: AnySelector{}, + CheckLatest: true, + } + // AnyVersion matches any local or remote version. + AnyVersion = Spec{ + Selector: AnySelector{}, + } +) diff --git a/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/workflows/workflows.go b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/workflows/workflows.go new file mode 100644 index 00000000000..fdabd995ae7 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/workflows/workflows.go @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package workflows + +import ( + "context" + "io" + + "github.com/go-logr/logr" + + envp "sigs.k8s.io/controller-runtime/tools/setup-envtest/env" +) + +// Use is a workflow that prints out information about stored +// version-platform pairs, downloading them if necessary & requested. +type Use struct { + UseEnv bool + AssetsPath string + PrintFormat envp.PrintFormat +} + +// Do executes this workflow. +func (f Use) Do(env *envp.Env) { + ctx := logr.NewContext(context.TODO(), env.Log.WithName("use")) + env.EnsureBaseDirs(ctx) + if f.UseEnv { + // the env var unconditionally + if env.PathMatches(f.AssetsPath) { + env.PrintInfo(f.PrintFormat) + return + } + } + env.EnsureVersionIsSet(ctx) + if env.ExistsAndValid() { + env.PrintInfo(f.PrintFormat) + return + } + if env.NoDownload { + envp.Exit(2, "no such version (%s) exists on disk for this architecture (%s) -- try running `list -i` to see what's on disk", env.Version, env.Platform) + } + env.Fetch(ctx) + env.PrintInfo(f.PrintFormat) +} + +// List is a workflow that lists version-platform pairs in the store +// and on the remote server that match the given filter. +type List struct{} + +// Do executes this workflow. +func (List) Do(env *envp.Env) { + ctx := logr.NewContext(context.TODO(), env.Log.WithName("list")) + env.EnsureBaseDirs(ctx) + env.ListVersions(ctx) +} + +// Cleanup is a workflow that removes version-platform pairs from the store +// that match the given filter. +type Cleanup struct{} + +// Do executes this workflow. +func (Cleanup) Do(env *envp.Env) { + ctx := logr.NewContext(context.TODO(), env.Log.WithName("cleanup")) + + env.NoDownload = true + env.ForceDownload = false + + env.EnsureBaseDirs(ctx) + env.Remove(ctx) +} + +// Sideload is a workflow that adds or replaces a version-platform pair in the +// store, using the given archive as the files. +type Sideload struct { + Input io.Reader + PrintFormat envp.PrintFormat +} + +// Do executes this workflow. +func (f Sideload) Do(env *envp.Env) { + ctx := logr.NewContext(context.TODO(), env.Log.WithName("sideload")) + + env.EnsureBaseDirs(ctx) + env.NoDownload = true + env.Sideload(ctx, f.Input) + env.PrintInfo(f.PrintFormat) +}