From e0be86803f94e7d4c93c0eaff67a10b32c59519b Mon Sep 17 00:00:00 2001
From: Nahshon Unna-Tsameret
Date: Mon, 5 Dec 2022 08:18:30 +0200
Subject: [PATCH 1/4] Cache KubeVirt Boot Image
Create a new PVC (if missing) to pull the boot image, then modify the
KubeVirt node pull template, to clone from the new PVC, instead of pull
from each node.
Signed-off-by: Nahshon Unna-Tsameret
---
api/fixtures/example_kubevirt.go | 9 +
api/scheme.go | 2 +
api/v1alpha1/nodepool_types.go | 24 ++
api/v1alpha1/zz_generated.deepcopy.go | 20 ++
api/v1beta1/nodepool_types.go | 24 ++
api/v1beta1/zz_generated.deepcopy.go | 20 ++
cmd/cluster/core/create.go | 1 +
cmd/cluster/kubevirt/create.go | 10 +
.../hypershift.openshift.io_nodepools.yaml | 28 ++
cmd/nodepool/kubevirt/create.go | 11 +
docs/content/reference/api.md | 69 +++++
hack/app-sre/saas_template.yaml | 28 ++
.../nodepool/kubevirt/imagecacher.go | 249 ++++++++++++++++++
.../controllers/nodepool/kubevirt/kubevirt.go | 148 +++++------
.../nodepool/kubevirt/kubevirt_test.go | 210 +++++++++++++--
.../nodepool/nodepool_controller.go | 107 ++++----
.../nodepool/nodepool_controller_test.go | 5 +-
test/e2e/util/util.go | 2 +
18 files changed, 813 insertions(+), 154 deletions(-)
create mode 100644 hypershift-operator/controllers/nodepool/kubevirt/imagecacher.go
diff --git a/api/fixtures/example_kubevirt.go b/api/fixtures/example_kubevirt.go
index 413827edc97..f80840694d6 100644
--- a/api/fixtures/example_kubevirt.go
+++ b/api/fixtures/example_kubevirt.go
@@ -20,6 +20,8 @@ type ExampleKubevirtOptions struct {
BaseDomainPassthrough bool
InfraKubeConfig []byte
InfraNamespace string
+ CacheBootImage bool
+ CacheStrategyType string
}
func ExampleKubeVirtTemplate(o *ExampleKubevirtOptions) *hyperv1.KubevirtNodePoolPlatform {
@@ -69,5 +71,12 @@ func ExampleKubeVirtTemplate(o *ExampleKubevirtOptions) *hyperv1.KubevirtNodePoo
}
}
+ strategyType := hyperv1.KubevirtCachingStrategyType(o.CacheStrategyType)
+ if strategyType == hyperv1.KubevirtCachingStrategyNone || strategyType == hyperv1.KubevirtCachingStrategyPVC {
+ exampleTemplate.RootVolume.CacheStrategy = &hyperv1.KubevirtCachingStrategy{
+ Type: strategyType,
+ }
+ }
+
return exampleTemplate
}
diff --git a/api/scheme.go b/api/scheme.go
index c7daac1b358..fdc0f45b754 100644
--- a/api/scheme.go
+++ b/api/scheme.go
@@ -21,6 +21,7 @@ import (
"k8s.io/apimachinery/pkg/runtime/serializer/json"
kasv1beta1 "k8s.io/apiserver/pkg/apis/apiserver/v1beta1"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
+ cdiv1beta1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
capiaws "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
capiazure "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1"
capiibm "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta1"
@@ -71,4 +72,5 @@ func init() {
capiazure.AddToScheme(Scheme)
snapshotv1.AddToScheme(Scheme)
imagev1.AddToScheme(Scheme)
+ cdiv1beta1.AddToScheme(Scheme)
}
diff --git a/api/v1alpha1/nodepool_types.go b/api/v1alpha1/nodepool_types.go
index 821cbf5f4ce..ef58ca4c9d4 100644
--- a/api/v1alpha1/nodepool_types.go
+++ b/api/v1alpha1/nodepool_types.go
@@ -552,6 +552,26 @@ type KubevirtPersistentVolume struct {
AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty"`
}
+// KubevirtCachingStrategyType is the type of the boot image caching mechanism for the KubeVirt provider
+type KubevirtCachingStrategyType string
+
+const (
+ // KubevirtCachingStrategyNone means that hypershift will not cache the boot image
+ KubevirtCachingStrategyNone KubevirtCachingStrategyType = "None"
+
+ // KubevirtCachingStrategyPVC means that hypershift will cache the boot image into a PVC; only relevant when using
+ // a QCOW boot image, and is ignored when using a container image
+ KubevirtCachingStrategyPVC KubevirtCachingStrategyType = "PVC"
+)
+
+// KubevirtCachingStrategy defines the boot image caching strategy
+type KubevirtCachingStrategy struct {
+ // Type is the type of the caching strategy
+ // +kubebuilder:default=None
+ // +kubebuilder:validation:Enum=None;PVC
+ Type KubevirtCachingStrategyType `json:"type"`
+}
+
// KubevirtRootVolume represents the volume that the rhcos disk will be stored and run from.
type KubevirtRootVolume struct {
// Image represents what rhcos image to use for the node pool
@@ -561,6 +581,10 @@ type KubevirtRootVolume struct {
// KubevirtVolume represents of type of storage to run the image on
KubevirtVolume `json:",inline"`
+
+ // CacheStrategy defines the boot image caching strategy. Default - no caching
+ // +optional
+ CacheStrategy *KubevirtCachingStrategy `json:"CacheStrategy,omitempty"`
}
// KubevirtVolumeType is a specific supported KubeVirt volumes
diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go
index fb8924484a2..d9ff2d77f40 100644
--- a/api/v1alpha1/zz_generated.deepcopy.go
+++ b/api/v1alpha1/zz_generated.deepcopy.go
@@ -1377,6 +1377,21 @@ func (in *KubeconfigSecretRef) DeepCopy() *KubeconfigSecretRef {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubevirtCachingStrategy) DeepCopyInto(out *KubevirtCachingStrategy) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtCachingStrategy.
+func (in *KubevirtCachingStrategy) DeepCopy() *KubevirtCachingStrategy {
+ if in == nil {
+ return nil
+ }
+ out := new(KubevirtCachingStrategy)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubevirtCompute) DeepCopyInto(out *KubevirtCompute) {
*out = *in
@@ -1531,6 +1546,11 @@ func (in *KubevirtRootVolume) DeepCopyInto(out *KubevirtRootVolume) {
(*in).DeepCopyInto(*out)
}
in.KubevirtVolume.DeepCopyInto(&out.KubevirtVolume)
+ if in.CacheStrategy != nil {
+ in, out := &in.CacheStrategy, &out.CacheStrategy
+ *out = new(KubevirtCachingStrategy)
+ **out = **in
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtRootVolume.
diff --git a/api/v1beta1/nodepool_types.go b/api/v1beta1/nodepool_types.go
index 1382da37991..0a849aaa326 100644
--- a/api/v1beta1/nodepool_types.go
+++ b/api/v1beta1/nodepool_types.go
@@ -546,6 +546,26 @@ type KubevirtPersistentVolume struct {
AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty"`
}
+// KubevirtCachingStrategyType is the type of the boot image caching mechanism for the KubeVirt provider
+type KubevirtCachingStrategyType string
+
+const (
+ // KubevirtCachingStrategyNone means that hypershift will not cache the boot image
+ KubevirtCachingStrategyNone KubevirtCachingStrategyType = "None"
+
+ // KubevirtCachingStrategyPVC means that hypershift will cache the boot image into a PVC; only relevant when using
+ // a QCOW boot image, and is ignored when using a container image
+ KubevirtCachingStrategyPVC KubevirtCachingStrategyType = "PVC"
+)
+
+// KubevirtCachingStrategy defines the boot image caching strategy
+type KubevirtCachingStrategy struct {
+ // Type is the type of the caching strategy
+ // +kubebuilder:default=None
+ // +kubebuilder:validation:Enum=None;PVC
+ Type KubevirtCachingStrategyType `json:"type"`
+}
+
// KubevirtRootVolume represents the volume that the rhcos disk will be stored and run from.
type KubevirtRootVolume struct {
// Image represents what rhcos image to use for the node pool
@@ -555,6 +575,10 @@ type KubevirtRootVolume struct {
// KubevirtVolume represents of type of storage to run the image on
KubevirtVolume `json:",inline"`
+
+ // CacheStrategy defines the boot image caching strategy. Default - no caching
+ // +optional
+ CacheStrategy *KubevirtCachingStrategy `json:"CacheStrategy,omitempty"`
}
// KubevirtVolumeType is a specific supported KubeVirt volumes
diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go
index 136ed042b25..07027ca6bdf 100644
--- a/api/v1beta1/zz_generated.deepcopy.go
+++ b/api/v1beta1/zz_generated.deepcopy.go
@@ -1336,6 +1336,21 @@ func (in *KubeconfigSecretRef) DeepCopy() *KubeconfigSecretRef {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubevirtCachingStrategy) DeepCopyInto(out *KubevirtCachingStrategy) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtCachingStrategy.
+func (in *KubevirtCachingStrategy) DeepCopy() *KubevirtCachingStrategy {
+ if in == nil {
+ return nil
+ }
+ out := new(KubevirtCachingStrategy)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubevirtCompute) DeepCopyInto(out *KubevirtCompute) {
*out = *in
@@ -1490,6 +1505,11 @@ func (in *KubevirtRootVolume) DeepCopyInto(out *KubevirtRootVolume) {
(*in).DeepCopyInto(*out)
}
in.KubevirtVolume.DeepCopyInto(&out.KubevirtVolume)
+ if in.CacheStrategy != nil {
+ in, out := &in.CacheStrategy, &out.CacheStrategy
+ *out = new(KubevirtCachingStrategy)
+ **out = **in
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtRootVolume.
diff --git a/cmd/cluster/core/create.go b/cmd/cluster/core/create.go
index 6593ef2f9ee..bc754cf789f 100644
--- a/cmd/cluster/core/create.go
+++ b/cmd/cluster/core/create.go
@@ -121,6 +121,7 @@ type KubevirtPlatformCreateOptions struct {
RootVolumeAccessModes string
InfraKubeConfigFile string
InfraNamespace string
+ CacheStrategyType string
}
type AWSPlatformOptions struct {
diff --git a/cmd/cluster/kubevirt/create.go b/cmd/cluster/kubevirt/create.go
index 6ddfb2046f4..092fbaf5f8e 100644
--- a/cmd/cluster/kubevirt/create.go
+++ b/cmd/cluster/kubevirt/create.go
@@ -9,6 +9,7 @@ import (
"github.com/spf13/cobra"
apifixtures "github.com/openshift/hypershift/api/fixtures"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/cmd/cluster/core"
"github.com/openshift/hypershift/support/infraid"
)
@@ -33,6 +34,7 @@ func NewCreateCommand(opts *core.CreateOptions) *cobra.Command {
ContainerDiskImage: "",
RootVolumeSize: 32,
InfraKubeConfigFile: "",
+ CacheStrategyType: "",
}
cmd.Flags().StringVar(&opts.KubevirtPlatform.APIServerAddress, "api-server-address", opts.KubevirtPlatform.APIServerAddress, "The API server address that should be used for components outside the control plane")
@@ -45,6 +47,7 @@ func NewCreateCommand(opts *core.CreateOptions) *cobra.Command {
cmd.Flags().StringVar(&opts.KubevirtPlatform.ServicePublishingStrategy, "service-publishing-strategy", opts.KubevirtPlatform.ServicePublishingStrategy, fmt.Sprintf("Define how to expose the cluster services. Supported options: %s (Use LoadBalancer and Route to expose services), %s (Select a random node to expose service access through)", IngressServicePublishingStrategy, NodePortServicePublishingStrategy))
cmd.Flags().StringVar(&opts.KubevirtPlatform.InfraKubeConfigFile, "infra-kubeconfig-file", opts.KubevirtPlatform.InfraKubeConfigFile, "Path to a kubeconfig file of an external infra cluster to be used to create the guest clusters nodes onto")
cmd.Flags().StringVar(&opts.KubevirtPlatform.InfraNamespace, "infra-namespace", opts.KubevirtPlatform.InfraNamespace, "The namespace in the external infra cluster that is used to host the KubeVirt virtual machines. The namespace must exist prior to creating the HostedCluster")
+ cmd.Flags().StringVar(&opts.KubevirtPlatform.CacheStrategyType, "root-volume-cache-strategy", opts.KubevirtPlatform.CacheStrategyType, "Set the boot image caching strategy; Supported values:\n- \"None\": no caching (default).\n- \"PVC\": Cache into a PVC; only for QCOW image; ignored for container images")
cmd.MarkPersistentFlagRequired("pull-secret")
@@ -117,6 +120,12 @@ func applyPlatformSpecificsValues(ctx context.Context, exampleOptions *apifixtur
return fmt.Errorf("external infra cluster kubeconfig was provided but an infra namespace is missing")
}
+ if opts.KubevirtPlatform.CacheStrategyType != "" &&
+ opts.KubevirtPlatform.CacheStrategyType != string(hyperv1.KubevirtCachingStrategyNone) &&
+ opts.KubevirtPlatform.CacheStrategyType != string(hyperv1.KubevirtCachingStrategyPVC) {
+ return fmt.Errorf(`wrong value for the --root-volume-cache-strategy parameter. May be only "None" or "PVC"`)
+ }
+
exampleOptions.Kubevirt = &apifixtures.ExampleKubevirtOptions{
ServicePublishingStrategy: opts.KubevirtPlatform.ServicePublishingStrategy,
APIServerAddress: opts.KubevirtPlatform.APIServerAddress,
@@ -128,6 +137,7 @@ func applyPlatformSpecificsValues(ctx context.Context, exampleOptions *apifixtur
RootVolumeAccessModes: opts.KubevirtPlatform.RootVolumeAccessModes,
InfraKubeConfig: infraKubeConfigContents,
InfraNamespace: opts.KubevirtPlatform.InfraNamespace,
+ CacheStrategyType: opts.KubevirtPlatform.CacheStrategyType,
}
if opts.BaseDomain != "" {
diff --git a/cmd/install/assets/hypershift-operator/hypershift.openshift.io_nodepools.yaml b/cmd/install/assets/hypershift-operator/hypershift.openshift.io_nodepools.yaml
index 51dba57e63c..4cef58f8c84 100644
--- a/cmd/install/assets/hypershift-operator/hypershift.openshift.io_nodepools.yaml
+++ b/cmd/install/assets/hypershift-operator/hypershift.openshift.io_nodepools.yaml
@@ -539,6 +539,20 @@ spec:
description: RootVolume represents values associated with
the VM volume that will host rhcos
properties:
+ CacheStrategy:
+ description: CacheStrategy defines the boot image caching
+ strategy. Default - no caching
+ properties:
+ type:
+ default: None
+ description: Type is the type of the caching strategy
+ enum:
+ - None
+ - PVC
+ type: string
+ required:
+ - type
+ type: object
diskImage:
description: Image represents what rhcos image to use
for the node pool
@@ -1371,6 +1385,20 @@ spec:
description: RootVolume represents values associated with
the VM volume that will host rhcos
properties:
+ CacheStrategy:
+ description: CacheStrategy defines the boot image caching
+ strategy. Default - no caching
+ properties:
+ type:
+ default: None
+ description: Type is the type of the caching strategy
+ enum:
+ - None
+ - PVC
+ type: string
+ required:
+ - type
+ type: object
diskImage:
description: Image represents what rhcos image to use
for the node pool
diff --git a/cmd/nodepool/kubevirt/create.go b/cmd/nodepool/kubevirt/create.go
index c56e78b565d..52e9aa04d80 100644
--- a/cmd/nodepool/kubevirt/create.go
+++ b/cmd/nodepool/kubevirt/create.go
@@ -2,6 +2,7 @@ package kubevirt
import (
"context"
+ "fmt"
"github.com/openshift/hypershift/api/fixtures"
"github.com/spf13/cobra"
@@ -18,6 +19,7 @@ type KubevirtPlatformCreateOptions struct {
RootVolumeSize uint32
RootVolumeStorageClass string
RootVolumeAccessModes string
+ CacheStrategyType string
}
func NewCreateCommand(coreOpts *core.CreateNodePoolOptions) *cobra.Command {
@@ -25,6 +27,7 @@ func NewCreateCommand(coreOpts *core.CreateNodePoolOptions) *cobra.Command {
Memory: "4Gi",
Cores: 2,
ContainerDiskImage: "",
+ CacheStrategyType: "",
}
cmd := &cobra.Command{
Use: "kubevirt",
@@ -38,6 +41,7 @@ func NewCreateCommand(coreOpts *core.CreateNodePoolOptions) *cobra.Command {
cmd.Flags().Uint32Var(&platformOpts.RootVolumeSize, "root-volume-size", platformOpts.RootVolumeSize, "The size of the root volume for machines in the NodePool in Gi")
cmd.Flags().StringVar(&platformOpts.RootVolumeAccessModes, "root-volume-access-modes", platformOpts.RootVolumeAccessModes, "The access modes of the root volume to use for machines in the NodePool (comma-delimited list)")
cmd.Flags().StringVar(&platformOpts.ContainerDiskImage, "containerdisk", platformOpts.ContainerDiskImage, "A reference to docker image with the embedded disk to be used to create the machines")
+ cmd.Flags().StringVar(&platformOpts.CacheStrategyType, "root-volume-cache-strategy", platformOpts.CacheStrategyType, "Set the boot image caching strategy; Supported values:\n- \"None\": no caching (default).\n- \"PVC\": Cache into a PVC; only for QCOW image; ignored for container images")
cmd.RunE = coreOpts.CreateRunFunc(platformOpts)
@@ -45,6 +49,12 @@ func NewCreateCommand(coreOpts *core.CreateNodePoolOptions) *cobra.Command {
}
func (o *KubevirtPlatformCreateOptions) UpdateNodePool(_ context.Context, nodePool *hyperv1.NodePool, _ *hyperv1.HostedCluster, _ crclient.Client) error {
+ if o.CacheStrategyType != "" &&
+ o.CacheStrategyType != string(hyperv1.KubevirtCachingStrategyNone) &&
+ o.CacheStrategyType != string(hyperv1.KubevirtCachingStrategyPVC) {
+ return fmt.Errorf(`wrong value for the --root-volume-cache-strategy parameter. May be only "None" or "PVC"`)
+ }
+
nodePool.Spec.Platform.Kubevirt = fixtures.ExampleKubeVirtTemplate(&fixtures.ExampleKubevirtOptions{
Memory: o.Memory,
Cores: o.Cores,
@@ -52,6 +62,7 @@ func (o *KubevirtPlatformCreateOptions) UpdateNodePool(_ context.Context, nodePo
RootVolumeSize: o.RootVolumeSize,
RootVolumeStorageClass: o.RootVolumeStorageClass,
RootVolumeAccessModes: o.RootVolumeAccessModes,
+ CacheStrategyType: o.CacheStrategyType,
})
return nil
}
diff --git a/docs/content/reference/api.md b/docs/content/reference/api.md
index 132c15f3c8b..0463145bc3c 100644
--- a/docs/content/reference/api.md
+++ b/docs/content/reference/api.md
@@ -4732,6 +4732,61 @@ AWSKMSSpec
+###KubevirtCachingStrategy { #hypershift.openshift.io/v1beta1.KubevirtCachingStrategy }
+
+(Appears on:
+KubevirtRootVolume)
+
+
+
KubevirtCachingStrategy defines the boot image caching strategy
+
+
+###KubevirtCachingStrategyType { #hypershift.openshift.io/v1beta1.KubevirtCachingStrategyType }
+
+(Appears on:
+KubevirtCachingStrategy)
+
+
+
KubevirtCachingStrategyType is the type of the boot image caching mechanism for the KubeVirt provider
+
+
+
+
+| Value |
+Description |
+
+
+"None" |
+KubevirtCachingStrategyNone means that hypershift will not cache the boot image
+ |
+
"PVC" |
+KubevirtCachingStrategyPVC means that hypershift will cache the boot image into a PVC; only relevant when using
+a QCOW boot image, and is ignored when using a container image
+ |
+
+
###KubevirtCompute { #hypershift.openshift.io/v1beta1.KubevirtCompute }
(Appears on:
@@ -5079,6 +5134,20 @@ KubevirtVolume
KubevirtVolume represents of type of storage to run the image on
+
+
+CacheStrategy
+
+
+KubevirtCachingStrategy
+
+
+ |
+
+(Optional)
+ CacheStrategy defines the boot image caching strategy. Default - no caching
+ |
+
###KubevirtVolume { #hypershift.openshift.io/v1beta1.KubevirtVolume }
diff --git a/hack/app-sre/saas_template.yaml b/hack/app-sre/saas_template.yaml
index 74813b6442c..9ea87dd8367 100644
--- a/hack/app-sre/saas_template.yaml
+++ b/hack/app-sre/saas_template.yaml
@@ -42859,6 +42859,20 @@ objects:
description: RootVolume represents values associated with
the VM volume that will host rhcos
properties:
+ CacheStrategy:
+ description: CacheStrategy defines the boot image caching
+ strategy. Default - no caching
+ properties:
+ type:
+ default: None
+ description: Type is the type of the caching strategy
+ enum:
+ - None
+ - PVC
+ type: string
+ required:
+ - type
+ type: object
diskImage:
description: Image represents what rhcos image to use
for the node pool
@@ -43700,6 +43714,20 @@ objects:
description: RootVolume represents values associated with
the VM volume that will host rhcos
properties:
+ CacheStrategy:
+ description: CacheStrategy defines the boot image caching
+ strategy. Default - no caching
+ properties:
+ type:
+ default: None
+ description: Type is the type of the caching strategy
+ enum:
+ - None
+ - PVC
+ type: string
+ required:
+ - type
+ type: object
diskImage:
description: Image represents what rhcos image to use
for the node pool
diff --git a/hypershift-operator/controllers/nodepool/kubevirt/imagecacher.go b/hypershift-operator/controllers/nodepool/kubevirt/imagecacher.go
new file mode 100644
index 00000000000..850c9e46d72
--- /dev/null
+++ b/hypershift-operator/controllers/nodepool/kubevirt/imagecacher.go
@@ -0,0 +1,249 @@
+package kubevirt
+
+import (
+ "context"
+ "fmt"
+
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ apiresource "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/utils/pointer"
+ "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
+)
+
+const (
+ containerImagePrefix = "docker://"
+ bootImageDVAnnotationHash = "hypershift.openshift.io/kubevirt-boot-image-hash"
+ bootImageDVLabelRoleName = "hypershift.openshift.io/kubevirt-boot-image-role"
+ bootImageDVLabelRoleValue = "kv-boot-image-cache"
+ bootImageDVLabelInfraId = "hypershift.openshift.io/infra-id"
+ bootImageNamePrefix = bootImageDVLabelRoleValue + "-"
+
+ // A CDI annotation for DataVolume, to not wait to first customer, but start importing immediately.
+ // originally defined in CDI.
+ cdiImmediateBindingAnnotation = "cdi.kubevirt.io/storage.bind.immediate.requested"
+ // A CDI annotation for [not] deleting the DataVolume after the PVC population is completed.
+ // originally defined in CDI.
+ cdiDeleteAfterCompletionAnnotation = "cdi.kubevirt.io/storage.deleteAfterCompletion"
+)
+
+// BootImage represents the KubeVirt boot image. It responsible to hold cache the boot image and to generate its
+// reference to be used by the node templates.
+type BootImage interface {
+ // CacheImage creates a PVC to cache the node image.
+ CacheImage(ctx context.Context, cl client.Client, nodePool *hyperv1.NodePool, infraId string) error
+ getDVSourceForVMTemplate() *v1beta1.DataVolumeSource
+}
+
+// containerImage is the implementation of the BootImage interface for container images
+type containerImage struct {
+ name string
+}
+
+func newContainerBootImage(imageName string) *containerImage {
+ return &containerImage{
+ name: containerImagePrefix + imageName,
+ }
+}
+
+func (containerImage) CacheImage(_ context.Context, _ client.Client, _ *hyperv1.NodePool, _ string) error {
+ return nil // no implementation
+}
+
+func (ci containerImage) getDVSourceForVMTemplate() *v1beta1.DataVolumeSource {
+ pullMethod := v1beta1.RegistryPullNode
+ return &v1beta1.DataVolumeSource{
+ Registry: &v1beta1.DataVolumeSourceRegistry{
+ URL: &ci.name,
+ PullMethod: &pullMethod,
+ },
+ }
+}
+
+// containerImage is the implementation of the BootImage interface for container images
+type qcowImage struct {
+ name string
+}
+
+func newQCOWBootImage(imageName string) *qcowImage {
+ return &qcowImage{
+ name: imageName,
+ }
+}
+
+func (qcowImage) CacheImage(_ context.Context, _ client.Client, _ *hyperv1.NodePool, _ string) error {
+ return nil // no implementation
+}
+
+func (qi qcowImage) getDVSourceForVMTemplate() *v1beta1.DataVolumeSource {
+ return &v1beta1.DataVolumeSource{
+ HTTP: &v1beta1.DataVolumeSourceHTTP{
+ URL: qi.name,
+ },
+ }
+}
+
+// cachedQCOWImage is the implementation of the BootImage interface for QCOW images
+type cachedQCOWImage struct {
+ name string
+ hash string
+ namespace string
+ dvName string
+}
+
+func newCachedQCOWBootImage(name, hash, namespace string) *cachedQCOWImage {
+ return &cachedQCOWImage{
+ name: name,
+ hash: hash,
+ namespace: namespace,
+ }
+}
+
+func (qi *cachedQCOWImage) CacheImage(ctx context.Context, cl client.Client, nodePool *hyperv1.NodePool, infraId string) error {
+ logger := ctrl.LoggerFrom(ctx)
+
+ if nodePool.Spec.Platform.Kubevirt == nil {
+ // should never happen; but since CacheImage is exposed, we need to protect it from wrong inputs.
+ return fmt.Errorf("nodePool does not contain KubeVirt configurations")
+ }
+
+ dvList, err := getCacheDVs(ctx, cl, infraId, qi.namespace)
+ if err != nil {
+ return err
+ }
+
+ oldDVs := make([]v1beta1.DataVolume, 0)
+ var dvName string
+ for _, dv := range dvList {
+ if (len(dvName) == 0) && (dv.Annotations[bootImageDVAnnotationHash] == qi.hash) {
+ dvName = dv.Name
+ } else {
+ oldDVs = append(oldDVs, dv)
+ }
+ }
+
+ // if no DV with the required hash was found
+ if len(dvName) == 0 {
+ logger.Info("couldn't find boot image cache DataVolume; creating it...")
+ dv, err := qi.createDVForCache(ctx, cl, nodePool, infraId)
+ if err != nil {
+ return err
+ }
+ dvName = dv.Name
+ }
+
+ if len(dvName) == 0 {
+ return fmt.Errorf("can't get the name of the boot image cache DataVolume")
+ }
+ qi.dvName = dvName
+ qi.cleanOldCaches(ctx, cl, oldDVs)
+
+ return nil
+}
+
+func (qi *cachedQCOWImage) cleanOldCaches(ctx context.Context, cl client.Client, oldDVs []v1beta1.DataVolume) {
+ logger := ctrl.LoggerFrom(ctx)
+ for _, oldDV := range oldDVs {
+ if oldDV.DeletionTimestamp == nil {
+ logger.Info("deleting an old boot image cache DataVolume", "namespace", oldDV.Namespace, "DataVolume name", oldDV.Name)
+ err := cl.Delete(ctx, &oldDV)
+ if err != nil {
+ logger.Error(err, fmt.Sprintf("failed to delete an old DataVolume; namespace: %s, name: %s", oldDV.Namespace, oldDV.Name))
+ }
+ }
+ }
+}
+
+func (qi *cachedQCOWImage) createDVForCache(ctx context.Context, cl client.Client, nodePool *hyperv1.NodePool, infraId string) (*v1beta1.DataVolume, error) {
+ dv := qi.buildDVForCache(nodePool, infraId)
+
+ err := cl.Create(ctx, dv)
+ if err != nil && !errors.IsAlreadyExists(err) {
+ return nil, fmt.Errorf("failed to create a DataVolume for the boot image cache ; %w", err)
+ }
+
+ return dv, nil
+}
+
+func (qi *cachedQCOWImage) getDVSourceForVMTemplate() *v1beta1.DataVolumeSource {
+ return &v1beta1.DataVolumeSource{
+ PVC: &v1beta1.DataVolumeSourcePVC{
+ Namespace: qi.namespace,
+ Name: qi.dvName,
+ },
+ }
+}
+
+func (qi *cachedQCOWImage) buildDVForCache(nodePool *hyperv1.NodePool, infraId string) *v1beta1.DataVolume {
+ dv := &v1beta1.DataVolume{
+ ObjectMeta: metav1.ObjectMeta{
+ GenerateName: bootImageNamePrefix,
+ Namespace: qi.namespace,
+ Labels: map[string]string{
+ bootImageDVLabelRoleName: bootImageDVLabelRoleValue,
+ bootImageDVLabelInfraId: infraId,
+ },
+ Annotations: map[string]string{
+ bootImageDVAnnotationHash: qi.hash,
+ cdiImmediateBindingAnnotation: "true",
+ cdiDeleteAfterCompletionAnnotation: "false",
+ },
+ },
+ Spec: v1beta1.DataVolumeSpec{
+ Source: &v1beta1.DataVolumeSource{
+ HTTP: &v1beta1.DataVolumeSourceHTTP{
+ URL: qi.name,
+ },
+ },
+ Preallocation: pointer.Bool(true),
+ },
+ }
+
+ kvPlatform := nodePool.Spec.Platform.Kubevirt
+ if kvPlatform.RootVolume != nil && kvPlatform.RootVolume.Persistent != nil {
+ storageSpec := &v1beta1.StorageSpec{}
+ if kvPlatform.RootVolume.Persistent.Size != nil {
+ storageSpec.Resources = corev1.ResourceRequirements{
+ Requests: map[corev1.ResourceName]apiresource.Quantity{
+ corev1.ResourceStorage: *kvPlatform.RootVolume.Persistent.Size,
+ },
+ }
+ }
+
+ if kvPlatform.RootVolume.Persistent.StorageClass != nil {
+ storageSpec.StorageClassName = kvPlatform.RootVolume.Persistent.StorageClass
+ }
+
+ for _, am := range kvPlatform.RootVolume.Persistent.AccessModes {
+ storageSpec.AccessModes = append(storageSpec.AccessModes, corev1.PersistentVolumeAccessMode(am))
+ }
+
+ dv.Spec.Storage = storageSpec
+ }
+
+ return dv
+}
+
+func getCacheDVSelector(infraId string) client.MatchingLabels {
+ return map[string]string{
+ bootImageDVLabelRoleName: bootImageDVLabelRoleValue,
+ bootImageDVLabelInfraId: infraId,
+ }
+}
+
+func getCacheDVs(ctx context.Context, cl client.Client, infraId string, namespace string) ([]v1beta1.DataVolume, error) {
+ dvs := &v1beta1.DataVolumeList{}
+
+ err := cl.List(ctx, dvs, client.InNamespace(namespace), getCacheDVSelector(infraId))
+
+ if err != nil {
+ return nil, fmt.Errorf("failed to read DataVolumes; %w", err)
+ }
+
+ return dvs.Items, nil
+}
diff --git a/hypershift-operator/controllers/nodepool/kubevirt/kubevirt.go b/hypershift-operator/controllers/nodepool/kubevirt/kubevirt.go
index 0df9706f823..fe591f88764 100644
--- a/hypershift-operator/controllers/nodepool/kubevirt/kubevirt.go
+++ b/hypershift-operator/controllers/nodepool/kubevirt/kubevirt.go
@@ -2,49 +2,65 @@ package kubevirt
import (
"fmt"
- "strings"
- hyperv1 "github.com/openshift/hypershift/api/v1beta1"
- "github.com/openshift/hypershift/support/releaseinfo"
corev1 "k8s.io/api/core/v1"
apiresource "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubevirtv1 "kubevirt.io/api/core/v1"
"kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
capikubevirt "sigs.k8s.io/cluster-api-provider-kubevirt/api/v1alpha1"
+
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
+ "github.com/openshift/hypershift/support/releaseinfo"
)
-func defaultImage(releaseImage *releaseinfo.ReleaseImage) (string, error) {
+func defaultImage(releaseImage *releaseinfo.ReleaseImage) (string, string, error) {
arch, foundArch := releaseImage.StreamMetadata.Architectures["x86_64"]
if !foundArch {
- return "", fmt.Errorf("couldn't find OS metadata for architecture %q", "x64_64")
+ return "", "", fmt.Errorf("couldn't find OS metadata for architecture %q", "x64_64")
}
openStack, exists := arch.Artifacts["openstack"]
if !exists {
- return "", fmt.Errorf("couldn't find OS metadata for openstack")
+ return "", "", fmt.Errorf("couldn't find OS metadata for openstack")
}
artifact, exists := openStack.Formats["qcow2.gz"]
if !exists {
- return "", fmt.Errorf("couldn't find OS metadata for openstack qcow2.gz")
+ return "", "", fmt.Errorf("couldn't find OS metadata for openstack qcow2.gz")
}
disk, exists := artifact["disk"]
if !exists {
- return "", fmt.Errorf("couldn't find OS metadata for the openstack qcow2.gz disk")
+ return "", "", fmt.Errorf("couldn't find OS metadata for the openstack qcow2.gz disk")
}
- return disk.Location, nil
+ return disk.Location, disk.SHA256, nil
}
-func GetImage(nodePool *hyperv1.NodePool, releaseImage *releaseinfo.ReleaseImage) (string, error) {
- if nodePool.Spec.Platform.Kubevirt != nil &&
- nodePool.Spec.Platform.Kubevirt.RootVolume != nil &&
- nodePool.Spec.Platform.Kubevirt.RootVolume.Image != nil &&
- nodePool.Spec.Platform.Kubevirt.RootVolume.Image.ContainerDiskImage != nil {
+func GetImage(nodePool *hyperv1.NodePool, releaseImage *releaseinfo.ReleaseImage, hostedNamespace string) (BootImage, error) {
+ var rootVolume *hyperv1.KubevirtRootVolume
+ if nodePool.Spec.Platform.Kubevirt != nil {
+ rootVolume = nodePool.Spec.Platform.Kubevirt.RootVolume
+ }
+
+ if rootVolume != nil &&
+ rootVolume.Image != nil &&
+ rootVolume.Image.ContainerDiskImage != nil {
+
+ imageName := *nodePool.Spec.Platform.Kubevirt.RootVolume.Image.ContainerDiskImage
+
+ return newContainerBootImage(imageName), nil
+ }
+
+ imageName, imageHash, err := defaultImage(releaseImage)
+ if err != nil {
+ return nil, err
+ }
- return fmt.Sprintf("docker://%s", *nodePool.Spec.Platform.Kubevirt.RootVolume.Image.ContainerDiskImage), nil
+ // KubeVirt Caching is disabled by default
+ if rootVolume != nil && rootVolume.CacheStrategy != nil && rootVolume.CacheStrategy.Type == hyperv1.KubevirtCachingStrategyPVC {
+ return newCachedQCOWBootImage(imageName, imageHash, hostedNamespace), nil
}
- return defaultImage(releaseImage)
+ return newQCOWBootImage(imageName), nil
}
func PlatformValidation(nodePool *hyperv1.NodePool) error {
@@ -64,15 +80,18 @@ func PlatformValidation(nodePool *hyperv1.NodePool) error {
return nil
}
-func virtualMachineTemplateBase(image string, kvPlatform *hyperv1.KubevirtNodePoolPlatform) *capikubevirt.VirtualMachineTemplateSpec {
+func virtualMachineTemplateBase(nodePool *hyperv1.NodePool, bootImage BootImage) *capikubevirt.VirtualMachineTemplateSpec {
+ const rootVolumeName = "rhcos"
+
+ var (
+ memory apiresource.Quantity
+ cores uint32
+ )
- var memory apiresource.Quantity
- var volumeSize apiresource.Quantity
- var cores uint32
+ dvSource := bootImage.getDVSourceForVMTemplate()
- rootVolumeName := "rhcos"
runAlways := kubevirtv1.RunStrategyAlways
- pullMethod := v1beta1.RegistryPullNode
+ kvPlatform := nodePool.Spec.Platform.Kubevirt
if kvPlatform.Compute != nil {
if kvPlatform.Compute.Memory != nil {
@@ -83,16 +102,6 @@ func virtualMachineTemplateBase(image string, kvPlatform *hyperv1.KubevirtNodePo
}
}
- if kvPlatform.RootVolume != nil {
- if kvPlatform.RootVolume.Image != nil && kvPlatform.RootVolume.Image.ContainerDiskImage != nil {
- image = fmt.Sprintf("docker://%s", *kvPlatform.RootVolume.Image.ContainerDiskImage)
- }
-
- if kvPlatform.RootVolume.Persistent != nil && kvPlatform.RootVolume.Persistent.Size != nil {
- volumeSize = *kvPlatform.RootVolume.Persistent.Size
- }
- }
-
template := &capikubevirt.VirtualMachineTemplateSpec{
Spec: kubevirtv1.VirtualMachineSpec{
RunStrategy: &runAlways,
@@ -152,60 +161,41 @@ func virtualMachineTemplateBase(image string, kvPlatform *hyperv1.KubevirtNodePo
ObjectMeta: metav1.ObjectMeta{
Name: rootVolumeName,
},
+ Spec: v1beta1.DataVolumeSpec{
+ Source: dvSource,
+ },
}
- if strings.HasPrefix(image, "docker://") {
- dataVolume.Spec = v1beta1.DataVolumeSpec{
- Source: &v1beta1.DataVolumeSource{
- Registry: &v1beta1.DataVolumeSourceRegistry{
- URL: &image,
- PullMethod: &pullMethod,
- },
- },
- }
- } else {
- dataVolume.Spec = v1beta1.DataVolumeSpec{
- Source: &v1beta1.DataVolumeSource{
- HTTP: &v1beta1.DataVolumeSourceHTTP{
- URL: image,
- },
- },
- }
- }
+ if kvPlatform.RootVolume != nil {
+ if kvPlatform.RootVolume.Persistent != nil {
+ storageSpec := &v1beta1.StorageSpec{}
- dataVolume.Spec.Storage = &v1beta1.StorageSpec{
- Resources: corev1.ResourceRequirements{
- Requests: map[corev1.ResourceName]apiresource.Quantity{
- corev1.ResourceStorage: volumeSize,
- },
- },
- }
+ for _, ac := range kvPlatform.RootVolume.Persistent.AccessModes {
+ storageSpec.AccessModes = append(storageSpec.AccessModes, corev1.PersistentVolumeAccessMode(ac))
+ }
- if kvPlatform.RootVolume != nil &&
- kvPlatform.RootVolume.Persistent != nil {
- if kvPlatform.RootVolume.Persistent.StorageClass != nil {
- dataVolume.Spec.Storage.StorageClassName = kvPlatform.RootVolume.Persistent.StorageClass
- }
- if len(kvPlatform.RootVolume.Persistent.AccessModes) != 0 {
- var accessModes []corev1.PersistentVolumeAccessMode
- for _, am := range kvPlatform.RootVolume.Persistent.AccessModes {
- amv1 := corev1.PersistentVolumeAccessMode(am)
- accessModes = append(accessModes, amv1)
+ if kvPlatform.RootVolume.Persistent.Size != nil {
+ storageSpec.Resources = corev1.ResourceRequirements{
+ Requests: map[corev1.ResourceName]apiresource.Quantity{
+ corev1.ResourceStorage: *kvPlatform.RootVolume.Persistent.Size,
+ },
+ }
}
- dataVolume.Spec.Storage.AccessModes = accessModes
+
+ if kvPlatform.RootVolume.Persistent.StorageClass != nil {
+ storageSpec.StorageClassName = kvPlatform.RootVolume.Persistent.StorageClass
+ }
+
+ dataVolume.Spec.Storage = storageSpec
}
}
-
template.Spec.DataVolumeTemplates = []kubevirtv1.DataVolumeTemplateSpec{dataVolume}
return template
}
-func MachineTemplateSpec(image string, nodePool *hyperv1.NodePool, hcluster *hyperv1.HostedCluster) *capikubevirt.KubevirtMachineTemplateSpec {
- nodePoolNameLabelKey := hyperv1.NodePoolNameLabel
- infraIDLabelKey := hyperv1.InfraIDLabel
-
- vmTemplate := virtualMachineTemplateBase(image, nodePool.Spec.Platform.Kubevirt)
+func MachineTemplateSpec(nodePool *hyperv1.NodePool, bootImage BootImage, hcluster *hyperv1.HostedCluster) *capikubevirt.KubevirtMachineTemplateSpec {
+ vmTemplate := virtualMachineTemplateBase(nodePool, bootImage)
vmTemplate.Spec.Template.Spec.Affinity = &corev1.Affinity{
PodAntiAffinity: &corev1.PodAntiAffinity{
@@ -216,7 +206,7 @@ func MachineTemplateSpec(image string, nodePool *hyperv1.NodePool, hcluster *hyp
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
- Key: nodePoolNameLabelKey,
+ Key: hyperv1.NodePoolNameLabel,
Operator: metav1.LabelSelectorOpIn,
Values: []string{nodePool.Name},
},
@@ -236,11 +226,11 @@ func MachineTemplateSpec(image string, nodePool *hyperv1.NodePool, hcluster *hyp
vmTemplate.Spec.Template.ObjectMeta.Labels = map[string]string{}
}
- vmTemplate.Spec.Template.ObjectMeta.Labels[nodePoolNameLabelKey] = nodePool.Name
- vmTemplate.Spec.Template.ObjectMeta.Labels[infraIDLabelKey] = hcluster.Spec.InfraID
+ vmTemplate.Spec.Template.ObjectMeta.Labels[hyperv1.NodePoolNameLabel] = nodePool.Name
+ vmTemplate.Spec.Template.ObjectMeta.Labels[hyperv1.InfraIDLabel] = hcluster.Spec.InfraID
- vmTemplate.ObjectMeta.Labels[nodePoolNameLabelKey] = nodePool.Name
- vmTemplate.ObjectMeta.Labels[infraIDLabelKey] = hcluster.Spec.InfraID
+ vmTemplate.ObjectMeta.Labels[hyperv1.NodePoolNameLabel] = nodePool.Name
+ vmTemplate.ObjectMeta.Labels[hyperv1.InfraIDLabel] = hcluster.Spec.InfraID
if hcluster.Spec.Platform.Kubevirt != nil && hcluster.Spec.Platform.Kubevirt.Credentials != nil {
vmTemplate.ObjectMeta.Namespace = hcluster.Spec.Platform.Kubevirt.Credentials.InfraNamespace
diff --git a/hypershift-operator/controllers/nodepool/kubevirt/kubevirt_test.go b/hypershift-operator/controllers/nodepool/kubevirt/kubevirt_test.go
index 9b0c29ea369..06428944fbd 100644
--- a/hypershift-operator/controllers/nodepool/kubevirt/kubevirt_test.go
+++ b/hypershift-operator/controllers/nodepool/kubevirt/kubevirt_test.go
@@ -1,18 +1,36 @@
package kubevirt
import (
+ "context"
+ "fmt"
"testing"
+ "github.com/go-logr/logr"
+ "github.com/go-logr/zapr"
"github.com/google/go-cmp/cmp"
. "github.com/onsi/gomega"
- hyperv1 "github.com/openshift/hypershift/api/v1beta1"
+ "go.uber.org/zap/zaptest"
corev1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/api/equality"
apiresource "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
kubevirtv1 "kubevirt.io/api/core/v1"
"kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
capikubevirt "sigs.k8s.io/cluster-api-provider-kubevirt/api/v1alpha1"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
+
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
+)
+
+const (
+ namespace = "my-ns"
+ poolName = "my-pool"
+ clusterName = "my-cluster"
+ infraId = "infraId123"
+ hostedClusterNamespace = "hostedClusterNamespace"
+ bootImageName = "https://rhcos.mirror.openshift.com/art/storage/releases/rhcos-4.12/412.86.202209302317-0/x86_64/rhcos-412.86.202209302317-0-openstack.x86_64.qcow2.gz"
+ imageHash = "imageHash"
)
func TestKubevirtMachineTemplate(t *testing.T) {
@@ -26,10 +44,11 @@ func TestKubevirtMachineTemplate(t *testing.T) {
name: "happy flow",
nodePool: &hyperv1.NodePool{
ObjectMeta: metav1.ObjectMeta{
- Name: "my-pool",
+ Name: poolName,
+ Namespace: namespace,
},
Spec: hyperv1.NodePoolSpec{
- ClusterName: "",
+ ClusterName: clusterName,
Replicas: nil,
Config: nil,
Management: hyperv1.NodePoolManagement{},
@@ -54,7 +73,7 @@ func TestKubevirtMachineTemplate(t *testing.T) {
expected: &capikubevirt.KubevirtMachineTemplateSpec{
Template: capikubevirt.KubevirtMachineTemplateResource{
Spec: capikubevirt.KubevirtMachineSpec{
- VirtualMachineTemplate: *generateNodeTemplate("5Gi", 4, "docker://testimage", "32Gi"),
+ VirtualMachineTemplate: *generateNodeTemplate("5Gi", 4, "32Gi"),
},
},
},
@@ -64,17 +83,164 @@ func TestKubevirtMachineTemplate(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
g := NewWithT(t)
- err := PlatformValidation(tc.nodePool)
- g.Expect(err).ToNot(HaveOccurred())
+ g.Expect(PlatformValidation(tc.nodePool)).To(Succeed())
+
+ bootImage := newCachedQCOWBootImage(bootImageName, imageHash, hostedClusterNamespace)
+ bootImage.dvName = bootImageNamePrefix + "12345"
+ result := MachineTemplateSpec(tc.nodePool, bootImage, tc.hcluster)
+ g.Expect(result).To(Equal(tc.expected), "Comparison failed\n%v", cmp.Diff(tc.expected, result))
+ })
+ }
+}
- result := MachineTemplateSpec("", tc.nodePool, tc.hcluster)
- if !equality.Semantic.DeepEqual(tc.expected, result) {
- t.Errorf(cmp.Diff(tc.expected, result))
+func TestCacheImage(t *testing.T) {
+ nodePool := &hyperv1.NodePool{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: poolName,
+ Namespace: namespace,
+ },
+ Spec: hyperv1.NodePoolSpec{
+ ClusterName: clusterName,
+ Replicas: nil,
+ Config: nil,
+ Management: hyperv1.NodePoolManagement{},
+ AutoScaling: nil,
+ Platform: hyperv1.NodePoolPlatform{
+ Type: hyperv1.KubevirtPlatform,
+ Kubevirt: generateKubevirtPlatform("5Gi", 4, "testimage", "32Gi"),
+ },
+ Release: hyperv1.Release{},
+ },
+ }
+
+ testCases := []struct {
+ name string
+ nodePool *hyperv1.NodePool
+ existingResources []client.Object
+ asserFunc func(Gomega, []v1beta1.DataVolume, string, *cachedQCOWImage)
+ errExpected bool
+ dvNamePrefix string
+ }{
+ {
+ name: "happy flow - no existing PVC",
+ nodePool: nodePool,
+ errExpected: false,
+ dvNamePrefix: bootImageNamePrefix,
+ asserFunc: assertDV,
+ },
+ {
+ name: "happy flow - PVC already exists",
+ nodePool: nodePool,
+ errExpected: false,
+ existingResources: []client.Object{
+ &v1beta1.DataVolume{
+ ObjectMeta: metav1.ObjectMeta{
+ GenerateName: bootImageNamePrefix,
+ Name: bootImageNamePrefix + "already-exists",
+ Namespace: hostedClusterNamespace,
+ Annotations: map[string]string{
+ bootImageDVAnnotationHash: imageHash,
+ },
+ Labels: map[string]string{
+ bootImageDVLabelRoleName: bootImageDVLabelRoleValue,
+ bootImageDVLabelInfraId: infraId,
+ },
+ },
+ },
+ },
+ dvNamePrefix: bootImageNamePrefix + "already-exists",
+ asserFunc: assertDV,
+ },
+ {
+ name: "cleanup - different hash",
+ nodePool: nodePool,
+ errExpected: false,
+ existingResources: []client.Object{
+ &v1beta1.DataVolume{
+ ObjectMeta: metav1.ObjectMeta{
+ GenerateName: bootImageNamePrefix,
+ Name: bootImageNamePrefix + "another-one",
+ Namespace: hostedClusterNamespace,
+ Annotations: map[string]string{
+ bootImageDVAnnotationHash: "otherImageHash",
+ bootImageDVLabelInfraId: infraId,
+ },
+ Labels: map[string]string{
+ bootImageDVLabelRoleName: bootImageDVLabelRoleValue,
+ bootImageDVLabelInfraId: infraId,
+ },
+ },
+ },
+ },
+ dvNamePrefix: bootImageNamePrefix,
+ asserFunc: assertDV,
+ },
+ {
+ name: "cleanup - different cluster - should not clean",
+ nodePool: nodePool,
+ errExpected: false,
+ existingResources: []client.Object{
+ &v1beta1.DataVolume{
+ ObjectMeta: metav1.ObjectMeta{
+ GenerateName: bootImageNamePrefix,
+ Name: bootImageNamePrefix + "another-one",
+ Namespace: hostedClusterNamespace,
+ Annotations: map[string]string{
+ bootImageDVAnnotationHash: imageHash,
+ bootImageDVLabelInfraId: "not-the-same-cluster",
+ },
+ Labels: map[string]string{
+ bootImageDVLabelRoleName: bootImageDVLabelRoleValue,
+ },
+ },
+ },
+ },
+ dvNamePrefix: bootImageNamePrefix,
+ asserFunc: func(g Gomega, dvs []v1beta1.DataVolume, expectedDVNamePrefix string, bootImage *cachedQCOWImage) {
+ g.ExpectWithOffset(1, dvs).Should(HaveLen(2), "should not clear the other DV")
+ for _, dv := range dvs {
+ if dv.Name != bootImageNamePrefix+"another-one" {
+ g.ExpectWithOffset(1, dv.Name).Should(HavePrefix(expectedDVNamePrefix))
+ g.ExpectWithOffset(1, bootImage.dvName).Should(Equal(dv.Name), "failed to set the dvName filed in the cacheImage object")
+ }
+ }
+ },
+ },
+ }
+
+ ctx := logr.NewContext(context.Background(), zapr.NewLogger(zaptest.NewLogger(t)))
+ for _, tc := range testCases {
+ t.Run(tc.name, func(tst *testing.T) {
+ g := NewWithT(tst)
+ scheme := runtime.NewScheme()
+ _ = corev1.AddToScheme(scheme)
+ _ = v1beta1.AddToScheme(scheme)
+ cl := fake.NewClientBuilder().WithScheme(scheme).WithObjects(tc.existingResources...).Build()
+
+ bootImage := newCachedQCOWBootImage(bootImageName, imageHash, hostedClusterNamespace)
+ err := bootImage.CacheImage(ctx, cl, tc.nodePool, infraId)
+
+ if tc.errExpected != (err != nil) {
+ if tc.errExpected {
+ g.Fail("should return error, but it didn't")
+ } else {
+ g.Fail(fmt.Sprintf(`should not return error, but it did; the error is: "%v"`, err))
+ }
}
+
+ dvs := v1beta1.DataVolumeList{}
+ g.Expect(cl.List(ctx, &dvs)).To(Succeed())
+ tc.asserFunc(g, dvs.Items, tc.dvNamePrefix, bootImage)
})
}
}
+func assertDV(g Gomega, dvs []v1beta1.DataVolume, expectedDVNamePrefix string, bootImage *cachedQCOWImage) {
+ g.ExpectWithOffset(1, dvs).Should(HaveLen(1), "failed to read the DataVolume back; No matched DataVolume")
+ g.ExpectWithOffset(1, dvs[0].Name).Should(HavePrefix(expectedDVNamePrefix))
+ g.ExpectWithOffset(1, bootImage.dvName).Should(Equal(dvs[0].Name), "failed to set the dvName filed in the cacheImage object")
+}
+
func generateKubevirtPlatform(memory string, cores uint32, image string, volumeSize string) *hyperv1.KubevirtNodePoolPlatform {
memoryQuantity := apiresource.MustParse(memory)
volumeSizeQuantity := apiresource.MustParse(volumeSize)
@@ -101,19 +267,15 @@ func generateKubevirtPlatform(memory string, cores uint32, image string, volumeS
return exampleTemplate
}
-func generateNodeTemplate(memory string, cpu uint32, image string, volumeSize string) *capikubevirt.VirtualMachineTemplateSpec {
+func generateNodeTemplate(memory string, cpu uint32, volumeSize string) *capikubevirt.VirtualMachineTemplateSpec {
runAlways := kubevirtv1.RunStrategyAlways
guestQuantity := apiresource.MustParse(memory)
- volumeSizeQuantity := apiresource.MustParse(volumeSize)
- nodePoolNameLabelKey := hyperv1.NodePoolNameLabel
- infraIDLabelKey := hyperv1.InfraIDLabel
- pullMethod := v1beta1.RegistryPullNode
return &capikubevirt.VirtualMachineTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
- nodePoolNameLabelKey: "my-pool",
- infraIDLabelKey: "1234",
+ hyperv1.NodePoolNameLabel: "my-pool",
+ hyperv1.InfraIDLabel: "1234",
},
},
Spec: kubevirtv1.VirtualMachineSpec{
@@ -126,15 +288,15 @@ func generateNodeTemplate(memory string, cpu uint32, image string, volumeSize st
},
Spec: v1beta1.DataVolumeSpec{
Source: &v1beta1.DataVolumeSource{
- Registry: &v1beta1.DataVolumeSourceRegistry{
- URL: &image,
- PullMethod: &pullMethod,
+ PVC: &v1beta1.DataVolumeSourcePVC{
+ Namespace: hostedClusterNamespace,
+ Name: bootImageNamePrefix + "12345",
},
},
Storage: &v1beta1.StorageSpec{
Resources: corev1.ResourceRequirements{
Requests: map[corev1.ResourceName]apiresource.Quantity{
- corev1.ResourceStorage: volumeSizeQuantity,
+ corev1.ResourceStorage: apiresource.MustParse(volumeSize),
},
},
},
@@ -144,8 +306,8 @@ func generateNodeTemplate(memory string, cpu uint32, image string, volumeSize st
Template: &kubevirtv1.VirtualMachineInstanceTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
- nodePoolNameLabelKey: "my-pool",
- infraIDLabelKey: "1234",
+ hyperv1.NodePoolNameLabel: "my-pool",
+ hyperv1.InfraIDLabel: "1234",
},
},
Spec: kubevirtv1.VirtualMachineInstanceSpec{
@@ -159,7 +321,7 @@ func generateNodeTemplate(memory string, cpu uint32, image string, volumeSize st
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
- Key: nodePoolNameLabelKey,
+ Key: hyperv1.NodePoolNameLabel,
Operator: metav1.LabelSelectorOpIn,
Values: []string{"my-pool"},
},
diff --git a/hypershift-operator/controllers/nodepool/nodepool_controller.go b/hypershift-operator/controllers/nodepool/nodepool_controller.go
index ae0d634b738..ac1aa26b025 100644
--- a/hypershift-operator/controllers/nodepool/nodepool_controller.go
+++ b/hypershift-operator/controllers/nodepool/nodepool_controller.go
@@ -300,6 +300,63 @@ func (r *NodePoolReconciler) reconcile(ctx context.Context, hcluster *hyperv1.Ho
ObservedGeneration: nodePool.Generation,
})
+ // Validate and get releaseImage.
+ releaseImage, err := r.getReleaseImage(ctx, hcluster, nodePool.Status.Version, nodePool.Spec.Release.Image)
+ if err != nil {
+ SetStatusCondition(&nodePool.Status.Conditions, hyperv1.NodePoolCondition{
+ Type: hyperv1.NodePoolValidReleaseImageConditionType,
+ Status: corev1.ConditionFalse,
+ Reason: hyperv1.NodePoolValidationFailedReason,
+ Message: fmt.Sprintf("Failed to get release image: %v", err.Error()),
+ ObservedGeneration: nodePool.Generation,
+ })
+ return ctrl.Result{}, fmt.Errorf("failed to look up release image metadata: %w", err)
+ }
+
+ var kubevirtBootImage kubevirt.BootImage
+ // moved KubeVirt specific handling up here, so the caching of the boot image will start as early as possible
+ // in order to actually save time. Caching form the original location will take more time, because the VMs can't
+ // be created before the caching is 100% done. But moving this logic here, the caching will be done in parallel
+ // to the ignition settings, and so it will be ready, or almost ready, when the VMs are created.
+ if nodePool.Spec.Platform.Type == hyperv1.KubevirtPlatform {
+ if err := kubevirt.PlatformValidation(nodePool); err != nil {
+ SetStatusCondition(&nodePool.Status.Conditions, hyperv1.NodePoolCondition{
+ Type: hyperv1.NodePoolValidMachineConfigConditionType,
+ Status: corev1.ConditionFalse,
+ Reason: hyperv1.NodePoolValidationFailedReason,
+ Message: fmt.Sprintf("validation of NodePool KubeVirt platform failed: %s", err.Error()),
+ ObservedGeneration: nodePool.Generation,
+ })
+ return ctrl.Result{}, fmt.Errorf("validation of NodePool KubeVirt platform failed: %w", err)
+ }
+ removeStatusCondition(&nodePool.Status.Conditions, hyperv1.NodePoolValidMachineConfigConditionType)
+
+ kubevirtBootImage, err = kubevirt.GetImage(nodePool, releaseImage, controlPlaneNamespace)
+ if err != nil {
+ SetStatusCondition(&nodePool.Status.Conditions, hyperv1.NodePoolCondition{
+ Type: hyperv1.NodePoolValidPlatformImageType,
+ Status: corev1.ConditionFalse,
+ Reason: hyperv1.NodePoolValidationFailedReason,
+ Message: fmt.Sprintf("Couldn't discover a KubeVirt Image for release image %q: %s", nodePool.Spec.Release.Image, err.Error()),
+ ObservedGeneration: nodePool.Generation,
+ })
+ return ctrl.Result{}, fmt.Errorf("couldn't discover a KubeVirt Image in release payload image: %w", err)
+ }
+
+ SetStatusCondition(&nodePool.Status.Conditions, hyperv1.NodePoolCondition{
+ Type: hyperv1.NodePoolValidPlatformImageType,
+ Status: corev1.ConditionTrue,
+ Reason: hyperv1.AsExpectedReason,
+ Message: fmt.Sprintf("Bootstrap KubeVirt Image is %q", kubevirtBootImage),
+ ObservedGeneration: nodePool.Generation,
+ })
+
+ err = kubevirtBootImage.CacheImage(ctx, r.Client, nodePool, hcluster.Spec.InfraID)
+ if err != nil {
+ return ctrl.Result{}, fmt.Errorf("failed to create or validate KubeVirt image cache: %w", err)
+ }
+ }
+
// Validate IgnitionEndpoint.
if ignEndpoint == "" {
SetStatusCondition(&nodePool.Status.Conditions, hyperv1.NodePoolCondition{
@@ -348,17 +405,6 @@ func (r *NodePoolReconciler) reconcile(ctx context.Context, hcluster *hyperv1.Ho
removeStatusCondition(&nodePool.Status.Conditions, string(hyperv1.IgnitionEndpointAvailable))
// Validate and get releaseImage.
- releaseImage, err := r.getReleaseImage(ctx, hcluster, nodePool.Status.Version, nodePool.Spec.Release.Image)
- if err != nil {
- SetStatusCondition(&nodePool.Status.Conditions, hyperv1.NodePoolCondition{
- Type: hyperv1.NodePoolValidReleaseImageConditionType,
- Status: corev1.ConditionFalse,
- Reason: hyperv1.NodePoolValidationFailedReason,
- Message: fmt.Sprintf("Failed to get release image: %v", err.Error()),
- ObservedGeneration: nodePool.Generation,
- })
- return ctrl.Result{}, fmt.Errorf("failed to look up release image metadata: %w", err)
- }
SetStatusCondition(&nodePool.Status.Conditions, hyperv1.NodePoolCondition{
Type: hyperv1.NodePoolValidReleaseImageConditionType,
Status: corev1.ConditionTrue,
@@ -445,41 +491,6 @@ func (r *NodePoolReconciler) reconcile(ctx context.Context, hcluster *hyperv1.Ho
})
}
- // Validate KubeVirt platform specific input
- var kubevirtBootImage string
- if nodePool.Spec.Platform.Type == hyperv1.KubevirtPlatform {
- if err := kubevirt.PlatformValidation(nodePool); err != nil {
- SetStatusCondition(&nodePool.Status.Conditions, hyperv1.NodePoolCondition{
- Type: hyperv1.NodePoolValidMachineConfigConditionType,
- Status: corev1.ConditionFalse,
- Reason: hyperv1.NodePoolValidationFailedReason,
- Message: fmt.Sprintf("validation of NodePool KubeVirt platform failed: %s", err.Error()),
- ObservedGeneration: nodePool.Generation,
- })
- return ctrl.Result{}, fmt.Errorf("validation of NodePool KubeVirt platform failed: %w", err)
- }
- removeStatusCondition(&nodePool.Status.Conditions, hyperv1.NodePoolValidMachineConfigConditionType)
-
- kubevirtBootImage, err = kubevirt.GetImage(nodePool, releaseImage)
- if err != nil {
- SetStatusCondition(&nodePool.Status.Conditions, hyperv1.NodePoolCondition{
- Type: hyperv1.NodePoolValidPlatformImageType,
- Status: corev1.ConditionFalse,
- Reason: hyperv1.NodePoolValidationFailedReason,
- Message: fmt.Sprintf("Couldn't discover a KubeVirt Image for release image %q: %s", nodePool.Spec.Release.Image, err.Error()),
- ObservedGeneration: nodePool.Generation,
- })
- return ctrl.Result{}, fmt.Errorf("couldn't discover a KubeVirt Image in release payload image: %w", err)
- }
- SetStatusCondition(&nodePool.Status.Conditions, hyperv1.NodePoolCondition{
- Type: hyperv1.NodePoolValidPlatformImageType,
- Status: corev1.ConditionTrue,
- Reason: hyperv1.AsExpectedReason,
- Message: fmt.Sprintf("Bootstrap KubeVirt Image is %q", kubevirtBootImage),
- ObservedGeneration: nodePool.Generation,
- })
- }
-
// Validate config input.
// 3 generic core config resources: fips, ssh and haproxy.
// TODO (alberto): consider moving the expectedCoreConfigResources check
@@ -2244,7 +2255,7 @@ func isPlatformNone(nodePool *hyperv1.NodePool) bool {
// a func to mutate the (platform)MachineTemplate.spec, a json string representation for (platform)MachineTemplate.spec
// and an error.
func machineTemplateBuilders(hcluster *hyperv1.HostedCluster, nodePool *hyperv1.NodePool,
- infraID, ami, powervsBootImage, kubevirtBootImage string, defaultSG bool) (client.Object, func(object client.Object) error, string, error) {
+ infraID, ami, powervsBootImage string, kubevirtBootImage kubevirt.BootImage, defaultSG bool) (client.Object, func(object client.Object) error, string, error) {
var mutateTemplate func(object client.Object) error
var template client.Object
var machineTemplateSpec interface{}
@@ -2281,7 +2292,7 @@ func machineTemplateBuilders(hcluster *hyperv1.HostedCluster, nodePool *hyperv1.
}
case hyperv1.KubevirtPlatform:
template = &capikubevirt.KubevirtMachineTemplate{}
- machineTemplateSpec = kubevirt.MachineTemplateSpec(kubevirtBootImage, nodePool, hcluster)
+ machineTemplateSpec = kubevirt.MachineTemplateSpec(nodePool, kubevirtBootImage, hcluster)
mutateTemplate = func(object client.Object) error {
o, _ := object.(*capikubevirt.KubevirtMachineTemplate)
o.Spec = *machineTemplateSpec.(*capikubevirt.KubevirtMachineTemplateSpec)
diff --git a/hypershift-operator/controllers/nodepool/nodepool_controller_test.go b/hypershift-operator/controllers/nodepool/nodepool_controller_test.go
index 2f425228adf..8ef642eb2c9 100644
--- a/hypershift-operator/controllers/nodepool/nodepool_controller_test.go
+++ b/hypershift-operator/controllers/nodepool/nodepool_controller_test.go
@@ -1486,7 +1486,7 @@ func RunTestMachineTemplateBuilders(t *testing.T, preCreateMachineTemplate bool)
expectedMachineTemplateSpecJSON, err := json.Marshal(expectedMachineTemplate.Spec)
g.Expect(err).ToNot(HaveOccurred())
- template, mutateTemplate, machineTemplateSpecJSON, err := machineTemplateBuilders(hcluster, nodePool, infraID, ami, "", "", true)
+ template, mutateTemplate, machineTemplateSpecJSON, err := machineTemplateBuilders(hcluster, nodePool, infraID, ami, "", nil, true)
g.Expect(err).ToNot(HaveOccurred())
g.Expect(machineTemplateSpecJSON).To(BeIdenticalTo(string(expectedMachineTemplateSpecJSON)))
@@ -1497,8 +1497,7 @@ func RunTestMachineTemplateBuilders(t *testing.T, preCreateMachineTemplate bool)
g.Expect(err).ToNot(HaveOccurred())
gotMachineTemplate := &capiaws.AWSMachineTemplate{}
- r.Client.Get(context.Background(), client.ObjectKeyFromObject(expectedMachineTemplate), gotMachineTemplate)
- g.Expect(err).ToNot(HaveOccurred())
+ g.Expect(r.Client.Get(context.Background(), client.ObjectKeyFromObject(expectedMachineTemplate), gotMachineTemplate)).To(Succeed())
g.Expect(expectedMachineTemplate.Spec).To(BeEquivalentTo(gotMachineTemplate.Spec))
g.Expect(expectedMachineTemplate.ObjectMeta.Annotations).To(BeEquivalentTo(gotMachineTemplate.ObjectMeta.Annotations))
}
diff --git a/test/e2e/util/util.go b/test/e2e/util/util.go
index aad0d907f5a..32c5e6b7286 100644
--- a/test/e2e/util/util.go
+++ b/test/e2e/util/util.go
@@ -175,6 +175,8 @@ func WaitForNReadyNodes(t *testing.T, ctx context.Context, client crclient.Clien
switch platform {
case hyperv1.PowerVSPlatform:
waitTimeout = 60 * time.Minute
+ case hyperv1.KubevirtPlatform:
+ waitTimeout = 60 * time.Minute
}
t.Logf("Waiting for nodes to become ready. Want: %v", n)
From 950a40ffc9ed7e29850423fb434d9e1cc22b9a65 Mon Sep 17 00:00:00 2001
From: Nahshon Unna-Tsameret
Date: Sun, 30 Apr 2023 16:43:55 +0300
Subject: [PATCH 2/4] KV cache: support external infra
Signed-off-by: Nahshon Unna-Tsameret
---
api/fixtures/example_kubevirt.go | 1 -
api/v1alpha1/nodepool_types.go | 21 ++++
api/v1alpha1/zz_generated.deepcopy.go | 40 ++++++
api/v1beta1/nodepool_types.go | 23 +++-
api/v1beta1/zz_generated.deepcopy.go | 40 ++++++
.../hypershift.openshift.io_nodepools.yaml | 34 ++++-
docs/content/reference/api.md | 89 ++++++++++++-
hack/app-sre/saas_template.yaml | 34 ++++-
.../hostedcluster/hostedcluster_controller.go | 77 ++----------
.../hostedcluster_controller_test.go | 31 ++++-
.../nodepool/kubevirt/imagecacher.go | 78 ++++++++++--
.../nodepool/kubevirt/kubevirt_test.go | 8 +-
.../nodepool/nodepool_controller.go | 67 +++++++++-
hypershift-operator/main.go | 3 +
kubevirtexternalinfra/externalinfra.go | 117 ++++++++++++++++++
15 files changed, 565 insertions(+), 98 deletions(-)
create mode 100644 kubevirtexternalinfra/externalinfra.go
diff --git a/api/fixtures/example_kubevirt.go b/api/fixtures/example_kubevirt.go
index f80840694d6..e3f7daeabc5 100644
--- a/api/fixtures/example_kubevirt.go
+++ b/api/fixtures/example_kubevirt.go
@@ -20,7 +20,6 @@ type ExampleKubevirtOptions struct {
BaseDomainPassthrough bool
InfraKubeConfig []byte
InfraNamespace string
- CacheBootImage bool
CacheStrategyType string
}
diff --git a/api/v1alpha1/nodepool_types.go b/api/v1alpha1/nodepool_types.go
index ef58ca4c9d4..a55a06d2368 100644
--- a/api/v1alpha1/nodepool_types.go
+++ b/api/v1alpha1/nodepool_types.go
@@ -184,6 +184,9 @@ type NodePoolStatus struct {
// +kubebuilder:validation:Optional
Version string `json:"version,omitempty"`
+ // Platform hols the specific statuses
+ Platform *NodePoolPlatformStatus `json:"platform,omitempty"`
+
// Conditions represents the latest available observations of the node pool's
// current state.
// +optional
@@ -814,6 +817,24 @@ type NodePoolCondition struct {
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
}
+// NodePoolPlatformStatus contains specific platform statuses
+type NodePoolPlatformStatus struct {
+ // KubeVirt contains the KubeVirt platform statuses
+ // +optional
+ KubeVirt *KubeVirtNodePoolStatus `json:"kubeVirt,omitempty"`
+}
+
+// KubeVirtNodePoolStatus contains the KubeVirt platform statuses
+type KubeVirtNodePoolStatus struct {
+ // CacheName holds the name of the cache DataVolume, if exists
+ // +optional
+ CacheName string `json:"cacheName,omitempty"`
+
+ // RemoteNamespace holds the namespace of the remote infra cluster, if defined
+ // +optional
+ RemoteNamespace string `json:"remoteNamespace,omitempty"`
+}
+
// Taint is as v1 Core but without TimeAdded.
// https://github.com/kubernetes/kubernetes/blob/ed8cad1e80d096257921908a52ac69cf1f41a098/staging/src/k8s.io/api/core/v1/types.go#L3037-L3053
type Taint struct {
diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go
index d9ff2d77f40..2edc4d8334d 100644
--- a/api/v1alpha1/zz_generated.deepcopy.go
+++ b/api/v1alpha1/zz_generated.deepcopy.go
@@ -1362,6 +1362,21 @@ func (in *KMSSpec) DeepCopy() *KMSSpec {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeVirtNodePoolStatus) DeepCopyInto(out *KubeVirtNodePoolStatus) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeVirtNodePoolStatus.
+func (in *KubeVirtNodePoolStatus) DeepCopy() *KubeVirtNodePoolStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeVirtNodePoolStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeconfigSecretRef) DeepCopyInto(out *KubeconfigSecretRef) {
*out = *in
@@ -1815,6 +1830,26 @@ func (in *NodePoolPlatform) DeepCopy() *NodePoolPlatform {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodePoolPlatformStatus) DeepCopyInto(out *NodePoolPlatformStatus) {
+ *out = *in
+ if in.KubeVirt != nil {
+ in, out := &in.KubeVirt, &out.KubeVirt
+ *out = new(KubeVirtNodePoolStatus)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePoolPlatformStatus.
+func (in *NodePoolPlatformStatus) DeepCopy() *NodePoolPlatformStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(NodePoolPlatformStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodePoolSpec) DeepCopyInto(out *NodePoolSpec) {
*out = *in
@@ -1883,6 +1918,11 @@ func (in *NodePoolSpec) DeepCopy() *NodePoolSpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodePoolStatus) DeepCopyInto(out *NodePoolStatus) {
*out = *in
+ if in.Platform != nil {
+ in, out := &in.Platform, &out.Platform
+ *out = new(NodePoolPlatformStatus)
+ (*in).DeepCopyInto(*out)
+ }
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]NodePoolCondition, len(*in))
diff --git a/api/v1beta1/nodepool_types.go b/api/v1beta1/nodepool_types.go
index 0a849aaa326..6d883d52b01 100644
--- a/api/v1beta1/nodepool_types.go
+++ b/api/v1beta1/nodepool_types.go
@@ -158,6 +158,9 @@ type NodePoolStatus struct {
// +kubebuilder:validation:Optional
Version string `json:"version,omitempty"`
+ // Platform hols the specific statuses
+ Platform *NodePoolPlatformStatus `json:"platform,omitempty"`
+
// Conditions represents the latest available observations of the node pool's
// current state.
// +optional
@@ -578,7 +581,7 @@ type KubevirtRootVolume struct {
// CacheStrategy defines the boot image caching strategy. Default - no caching
// +optional
- CacheStrategy *KubevirtCachingStrategy `json:"CacheStrategy,omitempty"`
+ CacheStrategy *KubevirtCachingStrategy `json:"cacheStrategy,omitempty"`
}
// KubevirtVolumeType is a specific supported KubeVirt volumes
@@ -806,6 +809,24 @@ type NodePoolCondition struct {
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
}
+// NodePoolPlatformStatus contains specific platform statuses
+type NodePoolPlatformStatus struct {
+ // KubeVirt contains the KubeVirt platform statuses
+ // +optional
+ KubeVirt *KubeVirtNodePoolStatus `json:"kubeVirt,omitempty"`
+}
+
+// KubeVirtNodePoolStatus contains the KubeVirt platform statuses
+type KubeVirtNodePoolStatus struct {
+ // CacheName holds the name of the cache DataVolume, if exists
+ // +optional
+ CacheName string `json:"cacheName,omitempty"`
+
+ // RemoteNamespace holds the namespace of the remote infra cluster, if defined
+ // +optional
+ RemoteNamespace string `json:"remoteNamespace,omitempty"`
+}
+
// Taint is as v1 Core but without TimeAdded.
// https://github.com/kubernetes/kubernetes/blob/ed8cad1e80d096257921908a52ac69cf1f41a098/staging/src/k8s.io/api/core/v1/types.go#L3037-L3053
type Taint struct {
diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go
index 07027ca6bdf..d74d2321d1b 100644
--- a/api/v1beta1/zz_generated.deepcopy.go
+++ b/api/v1beta1/zz_generated.deepcopy.go
@@ -1321,6 +1321,21 @@ func (in *KMSSpec) DeepCopy() *KMSSpec {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeVirtNodePoolStatus) DeepCopyInto(out *KubeVirtNodePoolStatus) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeVirtNodePoolStatus.
+func (in *KubeVirtNodePoolStatus) DeepCopy() *KubeVirtNodePoolStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeVirtNodePoolStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeconfigSecretRef) DeepCopyInto(out *KubeconfigSecretRef) {
*out = *in
@@ -1774,6 +1789,26 @@ func (in *NodePoolPlatform) DeepCopy() *NodePoolPlatform {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodePoolPlatformStatus) DeepCopyInto(out *NodePoolPlatformStatus) {
+ *out = *in
+ if in.KubeVirt != nil {
+ in, out := &in.KubeVirt, &out.KubeVirt
+ *out = new(KubeVirtNodePoolStatus)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePoolPlatformStatus.
+func (in *NodePoolPlatformStatus) DeepCopy() *NodePoolPlatformStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(NodePoolPlatformStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodePoolSpec) DeepCopyInto(out *NodePoolSpec) {
*out = *in
@@ -1837,6 +1872,11 @@ func (in *NodePoolSpec) DeepCopy() *NodePoolSpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodePoolStatus) DeepCopyInto(out *NodePoolStatus) {
*out = *in
+ if in.Platform != nil {
+ in, out := &in.Platform, &out.Platform
+ *out = new(NodePoolPlatformStatus)
+ (*in).DeepCopyInto(*out)
+ }
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]NodePoolCondition, len(*in))
diff --git a/cmd/install/assets/hypershift-operator/hypershift.openshift.io_nodepools.yaml b/cmd/install/assets/hypershift-operator/hypershift.openshift.io_nodepools.yaml
index 4cef58f8c84..dd66ab0675a 100644
--- a/cmd/install/assets/hypershift-operator/hypershift.openshift.io_nodepools.yaml
+++ b/cmd/install/assets/hypershift-operator/hypershift.openshift.io_nodepools.yaml
@@ -855,6 +855,22 @@ spec:
- type
type: object
type: array
+ platform:
+ description: Platform hols the specific statuses
+ properties:
+ kubeVirt:
+ description: KubeVirt contains the KubeVirt platform statuses
+ properties:
+ cacheName:
+ description: CacheName holds the name of the cache DataVolume,
+ if exists
+ type: string
+ remoteNamespace:
+ description: RemoteNamespace holds the namespace of the remote
+ infra cluster, if defined
+ type: string
+ type: object
+ type: object
replicas:
description: Replicas is the latest observed number of nodes in the
pool.
@@ -1385,7 +1401,7 @@ spec:
description: RootVolume represents values associated with
the VM volume that will host rhcos
properties:
- CacheStrategy:
+ cacheStrategy:
description: CacheStrategy defines the boot image caching
strategy. Default - no caching
properties:
@@ -1699,6 +1715,22 @@ spec:
- type
type: object
type: array
+ platform:
+ description: Platform hols the specific statuses
+ properties:
+ kubeVirt:
+ description: KubeVirt contains the KubeVirt platform statuses
+ properties:
+ cacheName:
+ description: CacheName holds the name of the cache DataVolume,
+ if exists
+ type: string
+ remoteNamespace:
+ description: RemoteNamespace holds the namespace of the remote
+ infra cluster, if defined
+ type: string
+ type: object
+ type: object
replicas:
description: Replicas is the latest observed number of nodes in the
pool.
diff --git a/docs/content/reference/api.md b/docs/content/reference/api.md
index 0463145bc3c..5cb2c931d39 100644
--- a/docs/content/reference/api.md
+++ b/docs/content/reference/api.md
@@ -4732,6 +4732,48 @@ AWSKMSSpec
+###KubeVirtNodePoolStatus { #hypershift.openshift.io/v1beta1.KubeVirtNodePoolStatus }
+
+(Appears on:
+NodePoolPlatformStatus)
+
+
+
KubeVirtNodePoolStatus contains the KubeVirt platform statuses
+
+
+
+
+| Field |
+Description |
+
+
+
+
+
+cacheName
+
+string
+
+ |
+
+(Optional)
+ CacheName holds the name of the cache DataVolume, if exists
+ |
+
+
+
+remoteNamespace
+
+string
+
+ |
+
+(Optional)
+ RemoteNamespace holds the namespace of the remote infra cluster, if defined
+ |
+
+
+
###KubevirtCachingStrategy { #hypershift.openshift.io/v1beta1.KubevirtCachingStrategy }
(Appears on:
@@ -5136,7 +5178,7 @@ KubevirtVolume
-CacheStrategy
+cacheStrategy
KubevirtCachingStrategy
@@ -5760,6 +5802,38 @@ PowerVSNodePoolPlatform
|
+###NodePoolPlatformStatus { #hypershift.openshift.io/v1beta1.NodePoolPlatformStatus }
+
+(Appears on:
+NodePoolStatus)
+
+
+
NodePoolPlatformStatus contains specific platform statuses
+
+
+
+
+| Field |
+Description |
+
+
+
+
+
+kubeVirt
+
+
+KubeVirtNodePoolStatus
+
+
+ |
+
+(Optional)
+ KubeVirt contains the KubeVirt platform statuses
+ |
+
+
+
###NodePoolSpec { #hypershift.openshift.io/v1beta1.NodePoolSpec }
(Appears on:
@@ -6004,6 +6078,19 @@ the NodePool.
+platform
+
+
+NodePoolPlatformStatus
+
+
+ |
+
+ Platform hols the specific statuses
+ |
+
+
+
conditions
diff --git a/hack/app-sre/saas_template.yaml b/hack/app-sre/saas_template.yaml
index 9ea87dd8367..eef1133e6e8 100644
--- a/hack/app-sre/saas_template.yaml
+++ b/hack/app-sre/saas_template.yaml
@@ -43177,6 +43177,22 @@ objects:
- type
type: object
type: array
+ platform:
+ description: Platform hols the specific statuses
+ properties:
+ kubeVirt:
+ description: KubeVirt contains the KubeVirt platform statuses
+ properties:
+ cacheName:
+ description: CacheName holds the name of the cache DataVolume,
+ if exists
+ type: string
+ remoteNamespace:
+ description: RemoteNamespace holds the namespace of the
+ remote infra cluster, if defined
+ type: string
+ type: object
+ type: object
replicas:
description: Replicas is the latest observed number of nodes in
the pool.
@@ -43714,7 +43730,7 @@ objects:
description: RootVolume represents values associated with
the VM volume that will host rhcos
properties:
- CacheStrategy:
+ cacheStrategy:
description: CacheStrategy defines the boot image caching
strategy. Default - no caching
properties:
@@ -44030,6 +44046,22 @@ objects:
- type
type: object
type: array
+ platform:
+ description: Platform hols the specific statuses
+ properties:
+ kubeVirt:
+ description: KubeVirt contains the KubeVirt platform statuses
+ properties:
+ cacheName:
+ description: CacheName holds the name of the cache DataVolume,
+ if exists
+ type: string
+ remoteNamespace:
+ description: RemoteNamespace holds the namespace of the
+ remote infra cluster, if defined
+ type: string
+ type: object
+ type: object
replicas:
description: Replicas is the latest observed number of nodes in
the pool.
diff --git a/hypershift-operator/controllers/hostedcluster/hostedcluster_controller.go b/hypershift-operator/controllers/hostedcluster/hostedcluster_controller.go
index 881163eba07..e3555ed898b 100644
--- a/hypershift-operator/controllers/hostedcluster/hostedcluster_controller.go
+++ b/hypershift-operator/controllers/hostedcluster/hostedcluster_controller.go
@@ -24,11 +24,8 @@ import (
"reflect"
"sort"
"strings"
- "sync"
"time"
- "k8s.io/client-go/tools/clientcmd"
-
"k8s.io/apimachinery/pkg/util/validation/field"
"github.com/aws/aws-sdk-go/aws"
@@ -55,6 +52,7 @@ import (
"github.com/openshift/hypershift/hypershift-operator/controllers/manifests/egressfirewall"
"github.com/openshift/hypershift/hypershift-operator/controllers/manifests/ignitionserver"
"github.com/openshift/hypershift/hypershift-operator/controllers/manifests/networkpolicy"
+ kvinfra "github.com/openshift/hypershift/kubevirtexternalinfra"
"github.com/openshift/hypershift/support/capabilities"
"github.com/openshift/hypershift/support/certs"
"github.com/openshift/hypershift/support/config"
@@ -126,11 +124,6 @@ const (
// NoopReconcile is just a default mutation function that does nothing.
var NoopReconcile controllerutil.MutateFn = func() error { return nil }
-type kubevirtInfraClient struct {
- client.Client
- namespace string
-}
-
// HostedClusterReconciler reconciles a HostedCluster object
type HostedClusterReconciler struct {
client.Client
@@ -172,7 +165,7 @@ type HostedClusterReconciler struct {
overwriteReconcile func(ctx context.Context, req ctrl.Request, log logr.Logger, hcluster *hyperv1.HostedCluster) (ctrl.Result, error)
now func() metav1.Time
- kubevirtInfraClients sync.Map
+ KubevirtInfraClients kvinfra.KubevirtInfraClientMap
}
// +kubebuilder:rbac:groups=hypershift.openshift.io,resources=hostedclusters,verbs=get;list;watch;create;update;patch;delete
@@ -3287,7 +3280,7 @@ func (r *HostedClusterReconciler) delete(ctx context.Context, hc *hyperv1.Hosted
return false, fmt.Errorf("failed to clean up OIDC bucket data: %w", err)
}
- r.kubevirtInfraClients.Delete(hc.Spec.InfraID)
+ r.KubevirtInfraClients.Delete(hc.Spec.InfraID)
// Block until the namespace is deleted, so that if a hostedcluster is deleted and then re-created with the same name
// we don't error initially because we can not create new content in a namespace that is being deleted.
@@ -3485,11 +3478,11 @@ func (r *HostedClusterReconciler) reconcileNetworkPolicies(ctx context.Context,
}); err != nil {
return fmt.Errorf("failed to reconcile virt launcher policy: %w", err)
}
- kvInfraCluster, err := r.discoverKubevirtClusterClient(ctx, hcluster, hcp.Namespace)
+ kvInfraCluster, err := r.KubevirtInfraClients.DiscoverKubevirtClusterClient(ctx, r.Client, hcluster.Spec.InfraID, hcluster, hcp.Namespace)
if err != nil {
return err
}
- egressFirewall := egressfirewall.VirtLauncherEgressFirewall(kvInfraCluster.namespace)
+ egressFirewall := egressfirewall.VirtLauncherEgressFirewall(kvInfraCluster.Namespace)
if _, err := createOrUpdate(ctx, kvInfraCluster.Client, egressFirewall, func() error {
return reconcileVirtLauncherEgressFirewall(egressFirewall)
}); err != nil {
@@ -4624,12 +4617,12 @@ func (r *HostedClusterReconciler) reconcileKubevirtPlatformDefaultSettings(ctx c
// auto generate the basedomain by retrieving the default ingress *.apps dns.
if hc.Spec.Platform.Kubevirt.BaseDomainPassthrough != nil && *hc.Spec.Platform.Kubevirt.BaseDomainPassthrough {
if hc.Spec.DNS.BaseDomain == "" {
- kvInfraCluster, err := r.discoverKubevirtClusterClient(ctx, hc, hc.Namespace)
+ kvInfraCluster, err := r.KubevirtInfraClients.DiscoverKubevirtClusterClient(ctx, r.Client, hc.Spec.InfraID, hc, hc.Namespace)
if err != nil {
return err
}
// kubevirtInfraTempRoute is used to resolve the base domain of the infra cluster without accessing IngressController
- kubevirtInfraTempRoute := manifests.KubevirtInfraTempRoute(kvInfraCluster.namespace)
+ kubevirtInfraTempRoute := manifests.KubevirtInfraTempRoute(kvInfraCluster.Namespace)
createOrUpdateProvider := upsert.New(r.EnableCIDebugOutput)
_, err = createOrUpdateProvider.CreateOrUpdate(ctx, kvInfraCluster.Client, kubevirtInfraTempRoute, func() error {
@@ -4731,62 +4724,6 @@ func (r *HostedClusterReconciler) dereferenceAWSRoles(ctx context.Context, roles
return nil
}
-func generateKubevirtInfraClusterClient(ctx context.Context, cpClient client.Client, hc hyperv1.HostedCluster) (client.Client, error) {
- infraClusterSecretRef := hc.Spec.Platform.Kubevirt.Credentials.InfraKubeConfigSecret
-
- infraKubeconfigSecret := &corev1.Secret{}
- secretNamespace := hc.Namespace
-
- infraKubeconfigSecretKey := client.ObjectKey{Namespace: secretNamespace, Name: infraClusterSecretRef.Name}
- if err := cpClient.Get(ctx, infraKubeconfigSecretKey, infraKubeconfigSecret); err != nil {
- return nil, fmt.Errorf("failed to fetch infra kubeconfig secret %s/%s: %w", secretNamespace, infraClusterSecretRef.Name, err)
- }
-
- kubeConfig, ok := infraKubeconfigSecret.Data["kubeconfig"]
- if !ok {
- return nil, errors.New("failed to retrieve infra kubeconfig from secret: 'kubeconfig' key is missing")
- }
-
- clientConfig, err := clientcmd.NewClientConfigFromBytes(kubeConfig)
- if err != nil {
- return nil, fmt.Errorf("failed to create K8s-API client config: %w", err)
- }
-
- restConfig, err := clientConfig.ClientConfig()
- if err != nil {
- return nil, fmt.Errorf("failed to create REST config: %w", err)
- }
- var infraClusterClient client.Client
-
- infraClusterClient, err = client.New(restConfig, client.Options{Scheme: cpClient.Scheme()})
- if err != nil {
- return nil, fmt.Errorf("failed to create infra cluster client: %w", err)
- }
-
- return infraClusterClient, nil
-}
-
-func (r *HostedClusterReconciler) discoverKubevirtClusterClient(ctx context.Context, hc *hyperv1.HostedCluster, localInfraNamespace string) (*kubevirtInfraClient, error) {
- cluster := &kubevirtInfraClient{}
- if hc.Spec.Platform.Kubevirt.Credentials == nil {
- cluster.Client = r.Client
- cluster.namespace = localInfraNamespace
- return cluster, nil
- }
- loaded, ok := r.kubevirtInfraClients.Load(hc.Spec.InfraID)
- if ok {
- return loaded.(*kubevirtInfraClient), nil
- }
- targetClient, err := generateKubevirtInfraClusterClient(ctx, r.Client, *hc)
- if err != nil {
- return nil, err
- }
- cluster.Client = targetClient
- cluster.namespace = hc.Spec.Platform.Kubevirt.Credentials.InfraNamespace
- r.kubevirtInfraClients.LoadOrStore(hc.Spec.InfraID, cluster)
- return cluster, nil
-}
-
// reconcileSREMetricsConfig loads the SRE metrics configuration (avoids parsing if the content is the same)
// and ensures that it is synced to the hosted control plane
func (r *HostedClusterReconciler) reconcileSREMetricsConfig(ctx context.Context, createOrUpdate upsert.CreateOrUpdateFN, hcp *hyperv1.HostedControlPlane) error {
diff --git a/hypershift-operator/controllers/hostedcluster/hostedcluster_controller_test.go b/hypershift-operator/controllers/hostedcluster/hostedcluster_controller_test.go
index e4e18bb85ad..bede0753510 100644
--- a/hypershift-operator/controllers/hostedcluster/hostedcluster_controller_test.go
+++ b/hypershift-operator/controllers/hostedcluster/hostedcluster_controller_test.go
@@ -18,6 +18,7 @@ import (
"github.com/openshift/hypershift/control-plane-operator/hostedclusterconfigoperator/controllers/resources/manifests"
"github.com/openshift/hypershift/hypershift-operator/controllers/hostedcluster/internal/platform/kubevirt"
"github.com/openshift/hypershift/hypershift-operator/controllers/manifests/controlplaneoperator"
+ kvinfra "github.com/openshift/hypershift/kubevirtexternalinfra"
"github.com/openshift/hypershift/support/capabilities"
fakecapabilities "github.com/openshift/hypershift/support/capabilities/fake"
fakereleaseprovider "github.com/openshift/hypershift/support/releaseinfo/fake"
@@ -898,10 +899,7 @@ func TestHostedClusterWatchesEverythingItCreates(t *testing.T) {
now: metav1.Now,
}
- r.kubevirtInfraClients.LoadOrStore("infra-id", &kubevirtInfraClient{
- namespace: "kubevirt-kubevirt",
- Client: &createTypeTrackingClient{Client: fake.NewClientBuilder().WithScheme(api.Scheme).WithObjects(objects...).Build()},
- })
+ r.KubevirtInfraClients = newKVInfraMapMock(objects)
ctrl.SetLogger(zap.New(zap.UseDevMode(true), zap.JSONEncoder(func(o *zapcore.EncoderConfig) {
o.EncodeTime = zapcore.RFC3339TimeEncoder
@@ -2272,3 +2270,28 @@ func TestHasBeenAvailable(t *testing.T) {
})
}
}
+
+type kubevirtInfraClientMapMock struct {
+ cluster *kvinfra.KubevirtInfraClient
+}
+
+func newKVInfraMapMock(objects []crclient.Object) kvinfra.KubevirtInfraClientMap {
+ return &kubevirtInfraClientMapMock{
+ cluster: &kvinfra.KubevirtInfraClient{
+ Client: &createTypeTrackingClient{Client: fake.NewClientBuilder().WithScheme(api.Scheme).WithObjects(objects...).Build()},
+ Namespace: "kubevirt-kubevirt",
+ },
+ }
+}
+
+func (k *kubevirtInfraClientMapMock) DiscoverKubevirtClusterClient(_ context.Context, _ crclient.Client, _ string, _ *hyperv1.HostedCluster, _ string) (*kvinfra.KubevirtInfraClient, error) {
+ return k.cluster, nil
+}
+
+func (k *kubevirtInfraClientMapMock) GetClient(_ string) *kvinfra.KubevirtInfraClient {
+ return k.cluster
+}
+
+func (*kubevirtInfraClientMapMock) Delete(_ string) {
+ // interface's empty implementation
+}
diff --git a/hypershift-operator/controllers/nodepool/kubevirt/imagecacher.go b/hypershift-operator/controllers/nodepool/kubevirt/imagecacher.go
index 850c9e46d72..8ab8855f445 100644
--- a/hypershift-operator/controllers/nodepool/kubevirt/imagecacher.go
+++ b/hypershift-operator/controllers/nodepool/kubevirt/imagecacher.go
@@ -21,7 +21,7 @@ const (
bootImageDVAnnotationHash = "hypershift.openshift.io/kubevirt-boot-image-hash"
bootImageDVLabelRoleName = "hypershift.openshift.io/kubevirt-boot-image-role"
bootImageDVLabelRoleValue = "kv-boot-image-cache"
- bootImageDVLabelInfraId = "hypershift.openshift.io/infra-id"
+ bootImageDVLabelUID = "hypershift.openshift.io/nodepool-uid"
bootImageNamePrefix = bootImageDVLabelRoleValue + "-"
// A CDI annotation for DataVolume, to not wait to first customer, but start importing immediately.
@@ -36,10 +36,14 @@ const (
// reference to be used by the node templates.
type BootImage interface {
// CacheImage creates a PVC to cache the node image.
- CacheImage(ctx context.Context, cl client.Client, nodePool *hyperv1.NodePool, infraId string) error
+ CacheImage(context.Context, client.Client, *hyperv1.NodePool, string) error
getDVSourceForVMTemplate() *v1beta1.DataVolumeSource
}
+type BootImageNamer interface {
+ GetCacheName() string
+}
+
// containerImage is the implementation of the BootImage interface for container images
type containerImage struct {
name string
@@ -104,7 +108,7 @@ func newCachedQCOWBootImage(name, hash, namespace string) *cachedQCOWImage {
}
}
-func (qi *cachedQCOWImage) CacheImage(ctx context.Context, cl client.Client, nodePool *hyperv1.NodePool, infraId string) error {
+func (qi *cachedQCOWImage) CacheImage(ctx context.Context, cl client.Client, nodePool *hyperv1.NodePool, uid string) error {
logger := ctrl.LoggerFrom(ctx)
if nodePool.Spec.Platform.Kubevirt == nil {
@@ -112,7 +116,27 @@ func (qi *cachedQCOWImage) CacheImage(ctx context.Context, cl client.Client, nod
return fmt.Errorf("nodePool does not contain KubeVirt configurations")
}
- dvList, err := getCacheDVs(ctx, cl, infraId, qi.namespace)
+ if nodePool.Status.Platform != nil &&
+ nodePool.Status.Platform.KubeVirt != nil &&
+ len(nodePool.Status.Platform.KubeVirt.CacheName) > 0 {
+ var dv v1beta1.DataVolume
+ dvName := nodePool.Status.Platform.KubeVirt.CacheName
+
+ err := cl.Get(ctx, client.ObjectKey{Name: dvName, Namespace: qi.namespace}, &dv)
+ if err == nil {
+ if annotations := dv.GetAnnotations(); annotations != nil && annotations[bootImageDVAnnotationHash] == qi.hash {
+ qi.dvName = dvName
+ return nil
+ }
+ } else {
+ if !errors.IsNotFound(err) {
+ return fmt.Errorf("can't read DataVolume %s/%s: %w", qi.namespace, dvName, err)
+ }
+ // cache DV not found - should keep searching, or, if missing, create it
+ }
+ }
+
+ dvList, err := getCacheDVs(ctx, cl, uid, qi.namespace)
if err != nil {
return err
}
@@ -130,7 +154,7 @@ func (qi *cachedQCOWImage) CacheImage(ctx context.Context, cl client.Client, nod
// if no DV with the required hash was found
if len(dvName) == 0 {
logger.Info("couldn't find boot image cache DataVolume; creating it...")
- dv, err := qi.createDVForCache(ctx, cl, nodePool, infraId)
+ dv, err := qi.createDVForCache(ctx, cl, nodePool, uid)
if err != nil {
return err
}
@@ -159,8 +183,8 @@ func (qi *cachedQCOWImage) cleanOldCaches(ctx context.Context, cl client.Client,
}
}
-func (qi *cachedQCOWImage) createDVForCache(ctx context.Context, cl client.Client, nodePool *hyperv1.NodePool, infraId string) (*v1beta1.DataVolume, error) {
- dv := qi.buildDVForCache(nodePool, infraId)
+func (qi *cachedQCOWImage) createDVForCache(ctx context.Context, cl client.Client, nodePool *hyperv1.NodePool, uid string) (*v1beta1.DataVolume, error) {
+ dv := qi.buildDVForCache(nodePool, uid)
err := cl.Create(ctx, dv)
if err != nil && !errors.IsAlreadyExists(err) {
@@ -179,14 +203,18 @@ func (qi *cachedQCOWImage) getDVSourceForVMTemplate() *v1beta1.DataVolumeSource
}
}
-func (qi *cachedQCOWImage) buildDVForCache(nodePool *hyperv1.NodePool, infraId string) *v1beta1.DataVolume {
+func (qi *cachedQCOWImage) GetCacheName() string {
+ return qi.dvName
+}
+
+func (qi *cachedQCOWImage) buildDVForCache(nodePool *hyperv1.NodePool, uid string) *v1beta1.DataVolume {
dv := &v1beta1.DataVolume{
ObjectMeta: metav1.ObjectMeta{
GenerateName: bootImageNamePrefix,
Namespace: qi.namespace,
Labels: map[string]string{
bootImageDVLabelRoleName: bootImageDVLabelRoleValue,
- bootImageDVLabelInfraId: infraId,
+ bootImageDVLabelUID: uid,
},
Annotations: map[string]string{
bootImageDVAnnotationHash: qi.hash,
@@ -229,17 +257,17 @@ func (qi *cachedQCOWImage) buildDVForCache(nodePool *hyperv1.NodePool, infraId s
return dv
}
-func getCacheDVSelector(infraId string) client.MatchingLabels {
+func getCacheDVSelector(uid string) client.MatchingLabels {
return map[string]string{
bootImageDVLabelRoleName: bootImageDVLabelRoleValue,
- bootImageDVLabelInfraId: infraId,
+ bootImageDVLabelUID: uid,
}
}
-func getCacheDVs(ctx context.Context, cl client.Client, infraId string, namespace string) ([]v1beta1.DataVolume, error) {
+func getCacheDVs(ctx context.Context, cl client.Client, uid string, namespace string) ([]v1beta1.DataVolume, error) {
dvs := &v1beta1.DataVolumeList{}
- err := cl.List(ctx, dvs, client.InNamespace(namespace), getCacheDVSelector(infraId))
+ err := cl.List(ctx, dvs, client.InNamespace(namespace), getCacheDVSelector(uid))
if err != nil {
return nil, fmt.Errorf("failed to read DataVolumes; %w", err)
@@ -247,3 +275,27 @@ func getCacheDVs(ctx context.Context, cl client.Client, infraId string, namespac
return dvs.Items, nil
}
+
+// DeleteCache deletes the cache DV
+//
+// This function is not part of the interface, because it called from the nodePool reconciler Delete() method, that is
+// called before getting the cacheImage.
+func DeleteCache(ctx context.Context, cl client.Client, name, namespace string) error {
+ dv := v1beta1.DataVolume{}
+ err := cl.Get(ctx, client.ObjectKey{Name: name, Namespace: namespace}, &dv)
+
+ if err != nil {
+ if errors.IsNotFound(err) {
+ return nil // already deleted
+ }
+
+ return fmt.Errorf("failed to get DataVolume %s/%s: %w", namespace, name, err)
+ }
+
+ err = cl.Delete(ctx, &dv)
+ if err != nil {
+ return fmt.Errorf("failed to delete DataVolume %s/%s: %w", namespace, name, err)
+ }
+
+ return nil
+}
diff --git a/hypershift-operator/controllers/nodepool/kubevirt/kubevirt_test.go b/hypershift-operator/controllers/nodepool/kubevirt/kubevirt_test.go
index 06428944fbd..522868c6379 100644
--- a/hypershift-operator/controllers/nodepool/kubevirt/kubevirt_test.go
+++ b/hypershift-operator/controllers/nodepool/kubevirt/kubevirt_test.go
@@ -143,7 +143,7 @@ func TestCacheImage(t *testing.T) {
},
Labels: map[string]string{
bootImageDVLabelRoleName: bootImageDVLabelRoleValue,
- bootImageDVLabelInfraId: infraId,
+ bootImageDVLabelUID: infraId,
},
},
},
@@ -163,11 +163,11 @@ func TestCacheImage(t *testing.T) {
Namespace: hostedClusterNamespace,
Annotations: map[string]string{
bootImageDVAnnotationHash: "otherImageHash",
- bootImageDVLabelInfraId: infraId,
+ bootImageDVLabelUID: infraId,
},
Labels: map[string]string{
bootImageDVLabelRoleName: bootImageDVLabelRoleValue,
- bootImageDVLabelInfraId: infraId,
+ bootImageDVLabelUID: infraId,
},
},
},
@@ -187,7 +187,7 @@ func TestCacheImage(t *testing.T) {
Namespace: hostedClusterNamespace,
Annotations: map[string]string{
bootImageDVAnnotationHash: imageHash,
- bootImageDVLabelInfraId: "not-the-same-cluster",
+ bootImageDVLabelUID: "not-the-same-cluster",
},
Labels: map[string]string{
bootImageDVLabelRoleName: bootImageDVLabelRoleValue,
diff --git a/hypershift-operator/controllers/nodepool/nodepool_controller.go b/hypershift-operator/controllers/nodepool/nodepool_controller.go
index ac1aa26b025..824e6333615 100644
--- a/hypershift-operator/controllers/nodepool/nodepool_controller.go
+++ b/hypershift-operator/controllers/nodepool/nodepool_controller.go
@@ -27,6 +27,7 @@ import (
"github.com/openshift/hypershift/hypershift-operator/controllers/nodepool/kubevirt"
"github.com/openshift/hypershift/hypershift-operator/controllers/nodepool/metrics"
ignserver "github.com/openshift/hypershift/ignition-server/controllers"
+ kvinfra "github.com/openshift/hypershift/kubevirtexternalinfra"
"github.com/openshift/hypershift/support/globalconfig"
"github.com/openshift/hypershift/support/releaseinfo"
"github.com/openshift/hypershift/support/supportedversion"
@@ -107,6 +108,7 @@ type NodePoolReconciler struct {
upsert.CreateOrUpdateProvider
HypershiftOperatorImage string
ImageMetadataProvider supportutil.ImageMetadataProvider
+ KubevirtInfraClients kvinfra.KubevirtInfraClientMap
}
type NotReadyError struct {
@@ -178,6 +180,7 @@ func (r *NodePoolReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c
return ctrl.Result{}, fmt.Errorf("failed to remove finalizer from nodepool: %w", err)
}
}
+
log.Info("Deleted nodepool", "name", req.NamespacedName)
return ctrl.Result{}, nil
}
@@ -331,7 +334,24 @@ func (r *NodePoolReconciler) reconcile(ctx context.Context, hcluster *hyperv1.Ho
}
removeStatusCondition(&nodePool.Status.Conditions, hyperv1.NodePoolValidMachineConfigConditionType)
- kubevirtBootImage, err = kubevirt.GetImage(nodePool, releaseImage, controlPlaneNamespace)
+ infraNS := controlPlaneNamespace
+ if hcluster.Spec.Platform.Kubevirt != nil &&
+ hcluster.Spec.Platform.Kubevirt.Credentials != nil &&
+ len(hcluster.Spec.Platform.Kubevirt.Credentials.InfraNamespace) > 0 {
+
+ infraNS = hcluster.Spec.Platform.Kubevirt.Credentials.InfraNamespace
+
+ if nodePool.Status.Platform == nil {
+ nodePool.Status.Platform = &hyperv1.NodePoolPlatformStatus{}
+ }
+
+ if nodePool.Status.Platform.KubeVirt == nil {
+ nodePool.Status.Platform.KubeVirt = &hyperv1.KubeVirtNodePoolStatus{}
+ }
+
+ nodePool.Status.Platform.KubeVirt.RemoteNamespace = infraNS
+ }
+ kubevirtBootImage, err = kubevirt.GetImage(nodePool, releaseImage, infraNS)
if err != nil {
SetStatusCondition(&nodePool.Status.Conditions, hyperv1.NodePoolCondition{
Type: hyperv1.NodePoolValidPlatformImageType,
@@ -351,10 +371,17 @@ func (r *NodePoolReconciler) reconcile(ctx context.Context, hcluster *hyperv1.Ho
ObservedGeneration: nodePool.Generation,
})
- err = kubevirtBootImage.CacheImage(ctx, r.Client, nodePool, hcluster.Spec.InfraID)
+ uid := string(nodePool.GetUID())
+ kvInfraClient, err := r.KubevirtInfraClients.DiscoverKubevirtClusterClient(ctx, r.Client, uid, hcluster, controlPlaneNamespace)
+ if err != nil {
+ return ctrl.Result{}, fmt.Errorf("failed to get KubeVirt external infra-cluster: %w", err)
+ }
+ err = kubevirtBootImage.CacheImage(ctx, kvInfraClient, nodePool, uid)
if err != nil {
return ctrl.Result{}, fmt.Errorf("failed to create or validate KubeVirt image cache: %w", err)
}
+
+ r.addKubeVirtCacheNameToStatus(kubevirtBootImage, nodePool)
}
// Validate IgnitionEndpoint.
@@ -954,6 +981,22 @@ func (r *NodePoolReconciler) reconcile(ctx context.Context, hcluster *hyperv1.Ho
return ctrl.Result{}, nil
}
+func (r *NodePoolReconciler) addKubeVirtCacheNameToStatus(kubevirtBootImage kubevirt.BootImage, nodePool *hyperv1.NodePool) {
+ if namer, ok := kubevirtBootImage.(kubevirt.BootImageNamer); ok {
+ if cacheName := namer.GetCacheName(); len(cacheName) > 0 {
+ if nodePool.Status.Platform == nil {
+ nodePool.Status.Platform = &hyperv1.NodePoolPlatformStatus{}
+ }
+
+ if nodePool.Status.Platform.KubeVirt == nil {
+ nodePool.Status.Platform.KubeVirt = &hyperv1.KubeVirtNodePoolStatus{}
+ }
+
+ nodePool.Status.Platform.KubeVirt.CacheName = cacheName
+ }
+ }
+}
+
// createReachedIgnitionEndpointCondition creates a condition for the NodePool based on the tokenSecret data.
func (r NodePoolReconciler) createReachedIgnitionEndpointCondition(ctx context.Context, tokenSecret *corev1.Secret, generation int64) (*hyperv1.NodePoolCondition, error) {
var condition *hyperv1.NodePoolCondition
@@ -1196,6 +1239,26 @@ func (r *NodePoolReconciler) delete(ctx context.Context, nodePool *hyperv1.NodeP
return fmt.Errorf("failed to delete NodePool secrets: %w", err)
}
+ if nodePool.Status.Platform != nil {
+ if nodePool.Status.Platform.KubeVirt != nil {
+ if cacheName := nodePool.Status.Platform.KubeVirt.CacheName; len(cacheName) > 0 {
+ ns := controlPlaneNamespace
+
+ if len(nodePool.Status.Platform.KubeVirt.RemoteNamespace) > 0 {
+ ns = nodePool.Status.Platform.KubeVirt.RemoteNamespace
+ }
+
+ if cl := r.KubevirtInfraClients.GetClient(string(nodePool.GetUID())); cl != nil {
+ err = kubevirt.DeleteCache(ctx, cl, cacheName, ns)
+ if err != nil {
+ return err
+ }
+ r.KubevirtInfraClients.Delete(string(nodePool.GetUID()))
+ }
+ }
+ }
+ }
+
return nil
}
diff --git a/hypershift-operator/main.go b/hypershift-operator/main.go
index 4a2070ffb2f..2c554e5ae3b 100644
--- a/hypershift-operator/main.go
+++ b/hypershift-operator/main.go
@@ -38,6 +38,7 @@ import (
"github.com/openshift/hypershift/hypershift-operator/controllers/proxy"
"github.com/openshift/hypershift/hypershift-operator/controllers/supportedversion"
"github.com/openshift/hypershift/hypershift-operator/controllers/uwmtelemetry"
+ kvinfra "github.com/openshift/hypershift/kubevirtexternalinfra"
"github.com/openshift/hypershift/pkg/version"
"github.com/openshift/hypershift/support/capabilities"
"github.com/openshift/hypershift/support/metrics"
@@ -271,6 +272,7 @@ func run(ctx context.Context, opts *StartOptions, log logr.Logger) error {
MetricsSet: metricsSet,
OperatorNamespace: opts.Namespace,
SREConfigHash: sreConfigHash,
+ KubevirtInfraClients: kvinfra.NewKubevirtInfraClientMap(),
}
if opts.OIDCStorageProviderS3BucketName != "" {
awsSession := awsutil.NewSession("hypershift-operator-oidc-bucket", opts.OIDCStorageProviderS3Credentials, "", "", opts.OIDCStorageProviderS3Region)
@@ -319,6 +321,7 @@ func run(ctx context.Context, opts *StartOptions, log logr.Logger) error {
CreateOrUpdateProvider: createOrUpdate,
HypershiftOperatorImage: operatorImage,
ImageMetadataProvider: &hyperutil.RegistryClientImageMetadataProvider{},
+ KubevirtInfraClients: kvinfra.NewKubevirtInfraClientMap(),
}).SetupWithManager(mgr); err != nil {
return fmt.Errorf("unable to create controller: %w", err)
}
diff --git a/kubevirtexternalinfra/externalinfra.go b/kubevirtexternalinfra/externalinfra.go
new file mode 100644
index 00000000000..cedcb029c56
--- /dev/null
+++ b/kubevirtexternalinfra/externalinfra.go
@@ -0,0 +1,117 @@
+package kubevirtexternalinfra
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/client-go/tools/clientcmd"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
+)
+
+type KubevirtInfraClientMap interface {
+ DiscoverKubevirtClusterClient(context.Context, client.Client, string, *hyperv1.HostedCluster, string) (*KubevirtInfraClient, error)
+ GetClient(key string) *KubevirtInfraClient
+ Delete(string)
+}
+
+func NewKubevirtInfraClientMap() KubevirtInfraClientMap {
+ return &kubevirtInfraClientMapImp{
+ theMap: sync.Map{},
+ }
+}
+
+type kubevirtInfraClientMapImp struct {
+ theMap sync.Map
+}
+
+type KubevirtInfraClient struct {
+ client.Client
+ Namespace string
+}
+
+func (k *kubevirtInfraClientMapImp) DiscoverKubevirtClusterClient(ctx context.Context, cl client.Client, key string, hc *hyperv1.HostedCluster, localInfraNamespace string) (*KubevirtInfraClient, error) {
+ if k == nil {
+ return nil, nil
+ }
+
+ if hc.Spec.Platform.Kubevirt.Credentials == nil {
+ return &KubevirtInfraClient{
+ Client: cl,
+ Namespace: localInfraNamespace,
+ }, nil
+ }
+ loaded, ok := k.theMap.Load(key)
+ if ok {
+ return loaded.(*KubevirtInfraClient), nil
+ }
+ targetClient, err := generateKubevirtInfraClusterClient(ctx, cl, *hc)
+ if err != nil {
+ return nil, err
+ }
+
+ cluster := &KubevirtInfraClient{
+ Client: targetClient,
+ Namespace: hc.Spec.Platform.Kubevirt.Credentials.InfraNamespace,
+ }
+
+ k.theMap.LoadOrStore(key, cluster)
+ return cluster, nil
+}
+
+func (k *kubevirtInfraClientMapImp) GetClient(key string) *KubevirtInfraClient {
+ if k == nil {
+ return nil
+ }
+ if cl, ok := k.theMap.Load(key); ok {
+ if clnt, ok := cl.(*KubevirtInfraClient); ok {
+ return clnt
+ }
+ }
+ return nil
+}
+
+func (k *kubevirtInfraClientMapImp) Delete(key string) {
+ if k != nil {
+ k.theMap.Delete(key)
+ }
+}
+
+func generateKubevirtInfraClusterClient(ctx context.Context, cpClient client.Client, hc hyperv1.HostedCluster) (client.Client, error) {
+ infraClusterSecretRef := hc.Spec.Platform.Kubevirt.Credentials.InfraKubeConfigSecret
+
+ infraKubeconfigSecret := &corev1.Secret{}
+ secretNamespace := hc.Namespace
+
+ infraKubeconfigSecretKey := client.ObjectKey{Namespace: secretNamespace, Name: infraClusterSecretRef.Name}
+ if err := cpClient.Get(ctx, infraKubeconfigSecretKey, infraKubeconfigSecret); err != nil {
+ return nil, fmt.Errorf("failed to fetch infra kubeconfig secret %s/%s: %w", secretNamespace, infraClusterSecretRef.Name, err)
+ }
+
+ kubeConfig, ok := infraKubeconfigSecret.Data["kubeconfig"]
+ if !ok {
+ return nil, errors.New("failed to retrieve infra kubeconfig from secret: 'kubeconfig' key is missing")
+ }
+
+ clientConfig, err := clientcmd.NewClientConfigFromBytes(kubeConfig)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create K8s-API client config: %w", err)
+ }
+
+ restConfig, err := clientConfig.ClientConfig()
+ if err != nil {
+ return nil, fmt.Errorf("failed to create REST config: %w", err)
+ }
+ var infraClusterClient client.Client
+
+ infraClusterClient, err = client.New(restConfig, client.Options{Scheme: cpClient.Scheme()})
+ if err != nil {
+ return nil, fmt.Errorf("failed to create infra cluster client: %w", err)
+ }
+
+ return infraClusterClient, nil
+}
From f7c364eadba4378d54d2e614c7931a5496c787a2 Mon Sep 17 00:00:00 2001
From: Nahshon Unna-Tsameret
Date: Thu, 4 May 2023 12:10:25 +0300
Subject: [PATCH 3/4] add e2e test for KubeVirt root image caching
Signed-off-by: Nahshon Unna-Tsameret
---
api/v1alpha1/nodepool_types.go | 2 +-
cmd/cluster/core/dump.go | 13 +++
.../hypershift.openshift.io_nodepools.yaml | 2 +-
hack/app-sre/saas_template.yaml | 2 +-
.../nodepool/kubevirt/imagecacher.go | 16 ++--
.../nodepool/nodepool_controller.go | 14 ++-
support/api/scheme.go | 2 +
test/e2e/nodepool_kv_cache_image_test.go | 96 +++++++++++++++++++
test/e2e/nodepool_machineconfig_test.go | 5 +
test/e2e/nodepool_nto_machineconfig_test.go | 8 +-
test/e2e/nodepool_test.go | 4 +
11 files changed, 149 insertions(+), 15 deletions(-)
create mode 100644 test/e2e/nodepool_kv_cache_image_test.go
diff --git a/api/v1alpha1/nodepool_types.go b/api/v1alpha1/nodepool_types.go
index a55a06d2368..2d090497f3b 100644
--- a/api/v1alpha1/nodepool_types.go
+++ b/api/v1alpha1/nodepool_types.go
@@ -587,7 +587,7 @@ type KubevirtRootVolume struct {
// CacheStrategy defines the boot image caching strategy. Default - no caching
// +optional
- CacheStrategy *KubevirtCachingStrategy `json:"CacheStrategy,omitempty"`
+ CacheStrategy *KubevirtCachingStrategy `json:"cacheStrategy,omitempty"`
}
// KubevirtVolumeType is a specific supported KubeVirt volumes
diff --git a/cmd/cluster/core/dump.go b/cmd/cluster/core/dump.go
index 173995682ff..deb9523f9aa 100644
--- a/cmd/cluster/core/dump.go
+++ b/cmd/cluster/core/dump.go
@@ -29,6 +29,7 @@ import (
"k8s.io/apimachinery/pkg/labels"
kubeclient "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
+ cdiv1beta1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
capiaws "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
capikubevirt "sigs.k8s.io/cluster-api-provider-kubevirt/api/v1alpha1"
capiv1 "sigs.k8s.io/cluster-api/api/v1beta1"
@@ -210,6 +211,7 @@ func DumpCluster(ctx context.Context, opts *DumpOptions) error {
&routev1.Route{},
&imagev1.ImageStream{},
&networkingv1.NetworkPolicy{},
+ &cdiv1beta1.DataVolume{},
}
resourceList := strings.Join(resourceTypes(resources), ",")
if opts.AgentNamespace != "" {
@@ -223,6 +225,17 @@ func DumpCluster(ctx context.Context, opts *DumpOptions) error {
cmd.WithNamespace(opts.AgentNamespace).Run(ctx, "agent.agent-install.openshift.io,infraenv.agent-install.openshift.io")
}
+ kubevirtInUse := false
+ for _, np := range nodePools {
+ if np.Spec.Platform.Type == hyperv1.KubevirtPlatform {
+ kubevirtInUse = true
+ break
+ }
+ }
+ if kubevirtInUse {
+ cmd.WithNamespace("openshift-virtualization").Run(ctx, resourceList)
+ }
+
podList := &corev1.PodList{}
if err = c.List(ctx, podList, client.InNamespace(controlPlaneNamespace)); err != nil {
opts.Log.Error(err, "Cannot list pods in controlplane namespace", "namespace", controlPlaneNamespace)
diff --git a/cmd/install/assets/hypershift-operator/hypershift.openshift.io_nodepools.yaml b/cmd/install/assets/hypershift-operator/hypershift.openshift.io_nodepools.yaml
index dd66ab0675a..dc1a20cf0ce 100644
--- a/cmd/install/assets/hypershift-operator/hypershift.openshift.io_nodepools.yaml
+++ b/cmd/install/assets/hypershift-operator/hypershift.openshift.io_nodepools.yaml
@@ -539,7 +539,7 @@ spec:
description: RootVolume represents values associated with
the VM volume that will host rhcos
properties:
- CacheStrategy:
+ cacheStrategy:
description: CacheStrategy defines the boot image caching
strategy. Default - no caching
properties:
diff --git a/hack/app-sre/saas_template.yaml b/hack/app-sre/saas_template.yaml
index eef1133e6e8..11d50f66ed5 100644
--- a/hack/app-sre/saas_template.yaml
+++ b/hack/app-sre/saas_template.yaml
@@ -42859,7 +42859,7 @@ objects:
description: RootVolume represents values associated with
the VM volume that will host rhcos
properties:
- CacheStrategy:
+ cacheStrategy:
description: CacheStrategy defines the boot image caching
strategy. Default - no caching
properties:
diff --git a/hypershift-operator/controllers/nodepool/kubevirt/imagecacher.go b/hypershift-operator/controllers/nodepool/kubevirt/imagecacher.go
index 8ab8855f445..a46a8daf7d6 100644
--- a/hypershift-operator/controllers/nodepool/kubevirt/imagecacher.go
+++ b/hypershift-operator/controllers/nodepool/kubevirt/imagecacher.go
@@ -151,6 +151,8 @@ func (qi *cachedQCOWImage) CacheImage(ctx context.Context, cl client.Client, nod
}
}
+ qi.cleanOldCaches(ctx, cl, oldDVs)
+
// if no DV with the required hash was found
if len(dvName) == 0 {
logger.Info("couldn't find boot image cache DataVolume; creating it...")
@@ -161,11 +163,7 @@ func (qi *cachedQCOWImage) CacheImage(ctx context.Context, cl client.Client, nod
dvName = dv.Name
}
- if len(dvName) == 0 {
- return fmt.Errorf("can't get the name of the boot image cache DataVolume")
- }
qi.dvName = dvName
- qi.cleanOldCaches(ctx, cl, oldDVs)
return nil
}
@@ -173,7 +171,7 @@ func (qi *cachedQCOWImage) CacheImage(ctx context.Context, cl client.Client, nod
func (qi *cachedQCOWImage) cleanOldCaches(ctx context.Context, cl client.Client, oldDVs []v1beta1.DataVolume) {
logger := ctrl.LoggerFrom(ctx)
for _, oldDV := range oldDVs {
- if oldDV.DeletionTimestamp == nil {
+ if oldDV.DeletionTimestamp.IsZero() {
logger.Info("deleting an old boot image cache DataVolume", "namespace", oldDV.Namespace, "DataVolume name", oldDV.Name)
err := cl.Delete(ctx, &oldDV)
if err != nil {
@@ -292,9 +290,11 @@ func DeleteCache(ctx context.Context, cl client.Client, name, namespace string)
return fmt.Errorf("failed to get DataVolume %s/%s: %w", namespace, name, err)
}
- err = cl.Delete(ctx, &dv)
- if err != nil {
- return fmt.Errorf("failed to delete DataVolume %s/%s: %w", namespace, name, err)
+ if dv.ObjectMeta.DeletionTimestamp.IsZero() {
+ err = cl.Delete(ctx, &dv)
+ if err != nil {
+ return fmt.Errorf("failed to delete DataVolume %s/%s: %w", namespace, name, err)
+ }
}
return nil
diff --git a/hypershift-operator/controllers/nodepool/nodepool_controller.go b/hypershift-operator/controllers/nodepool/nodepool_controller.go
index 824e6333615..30559c36b13 100644
--- a/hypershift-operator/controllers/nodepool/nodepool_controller.go
+++ b/hypershift-operator/controllers/nodepool/nodepool_controller.go
@@ -1235,10 +1235,19 @@ func (r *NodePoolReconciler) delete(ctx context.Context, nodePool *hyperv1.NodeP
}
// Delete all secrets related to the NodePool
- if err := r.deleteNodePoolSecrets(ctx, nodePool); err != nil {
+ if err = r.deleteNodePoolSecrets(ctx, nodePool); err != nil {
return fmt.Errorf("failed to delete NodePool secrets: %w", err)
}
+ err = r.deleteKubeVirtCache(ctx, nodePool, controlPlaneNamespace)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (r *NodePoolReconciler) deleteKubeVirtCache(ctx context.Context, nodePool *hyperv1.NodePool, controlPlaneNamespace string) error {
if nodePool.Status.Platform != nil {
if nodePool.Status.Platform.KubeVirt != nil {
if cacheName := nodePool.Status.Platform.KubeVirt.CacheName; len(cacheName) > 0 {
@@ -1249,7 +1258,7 @@ func (r *NodePoolReconciler) delete(ctx context.Context, nodePool *hyperv1.NodeP
}
if cl := r.KubevirtInfraClients.GetClient(string(nodePool.GetUID())); cl != nil {
- err = kubevirt.DeleteCache(ctx, cl, cacheName, ns)
+ err := kubevirt.DeleteCache(ctx, cl, cacheName, ns)
if err != nil {
return err
}
@@ -1258,7 +1267,6 @@ func (r *NodePoolReconciler) delete(ctx context.Context, nodePool *hyperv1.NodeP
}
}
}
-
return nil
}
diff --git a/support/api/scheme.go b/support/api/scheme.go
index 47e52ca2877..c44839af230 100644
--- a/support/api/scheme.go
+++ b/support/api/scheme.go
@@ -27,6 +27,7 @@ import (
apiserverconfigv1 "k8s.io/apiserver/pkg/apis/config/v1"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
+ cdiv1beta1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
capiaws "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
capiibm "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta1"
capiv1 "sigs.k8s.io/cluster-api/api/v1beta1"
@@ -79,4 +80,5 @@ func init() {
prometheusoperatorv1.AddToScheme(Scheme)
}
mcfgv1.AddToScheme(Scheme)
+ cdiv1beta1.AddToScheme(Scheme)
}
diff --git a/test/e2e/nodepool_kv_cache_image_test.go b/test/e2e/nodepool_kv_cache_image_test.go
new file mode 100644
index 00000000000..fa2ad606064
--- /dev/null
+++ b/test/e2e/nodepool_kv_cache_image_test.go
@@ -0,0 +1,96 @@
+//go:build e2e
+// +build e2e
+
+package e2e
+
+import (
+ "context"
+ "github.com/openshift/hypershift/hypershift-operator/controllers/manifests"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/utils/pointer"
+ "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
+ "sigs.k8s.io/cluster-api/util"
+ "testing"
+ "time"
+
+ . "github.com/onsi/gomega"
+ corev1 "k8s.io/api/core/v1"
+ crclient "sigs.k8s.io/controller-runtime/pkg/client"
+
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
+ kvinfra "github.com/openshift/hypershift/kubevirtexternalinfra"
+)
+
+type KubeVirtCacheTest struct {
+ ctx context.Context
+ client crclient.Client
+ hostedCluster *hyperv1.HostedCluster
+}
+
+func NewKubeVirtCacheTest(ctx context.Context, cl crclient.Client, hc *hyperv1.HostedCluster) *KubeVirtCacheTest {
+ return &KubeVirtCacheTest{
+ ctx: ctx,
+ client: cl,
+ hostedCluster: hc,
+ }
+}
+
+func (k KubeVirtCacheTest) Setup(t *testing.T) {
+ if globalOpts.Platform != hyperv1.KubevirtPlatform {
+ t.Skip("test only supported on platform KubeVirt")
+ }
+
+ t.Log("Starting test KubeVirtCacheTest")
+}
+
+func (k KubeVirtCacheTest) Run(t *testing.T, nodePool hyperv1.NodePool, nodes []corev1.Node) {
+ g := NewWithT(t)
+
+ np := &hyperv1.NodePool{}
+ g.Eventually(func(gg Gomega) {
+ gg.Expect(k.client.Get(k.ctx, util.ObjectKey(&nodePool), np)).Should(Succeed())
+ gg.Expect(np.Status.Platform).ToNot(BeNil())
+ gg.Expect(np.Status.Platform.KubeVirt).ToNot(BeNil())
+ gg.Expect(np.Status.Platform.KubeVirt.CacheName).ToNot(BeEmpty(), "cache DataVolume name should be populated")
+ }).Within(5 * time.Minute).WithPolling(time.Second).Should(Succeed())
+
+ localInfraNS := manifests.HostedControlPlaneNamespace(k.hostedCluster.Namespace, k.hostedCluster.Name).Name
+ var guestNamespace string
+ if len(np.Status.Platform.KubeVirt.RemoteNamespace) != 0 {
+ guestNamespace = np.Status.Platform.KubeVirt.RemoteNamespace
+ } else {
+ guestNamespace = localInfraNS
+ }
+
+ cm := kvinfra.NewKubevirtInfraClientMap()
+ infraClient, err := cm.DiscoverKubevirtClusterClient(k.ctx, k.client, k.hostedCluster.Spec.InfraID, k.hostedCluster, localInfraNS)
+ g.Expect(err).ShouldNot(HaveOccurred())
+
+ dv := &v1beta1.DataVolume{}
+ g.Expect(
+ infraClient.Get(k.ctx, crclient.ObjectKey{Namespace: guestNamespace, Name: np.Status.Platform.KubeVirt.CacheName}, dv),
+ ).To(Succeed())
+
+ g.Expect(dv.Status.Phase).Should(Equal(v1beta1.Succeeded))
+}
+
+func (k KubeVirtCacheTest) BuildNodePoolManifest(defaultNodepool hyperv1.NodePool) (*hyperv1.NodePool, error) {
+ nodePool := &hyperv1.NodePool{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: k.hostedCluster.Name + "-" + "test-kv-cache-root-volume",
+ Namespace: k.hostedCluster.Namespace,
+ },
+ }
+ defaultNodepool.Spec.DeepCopyInto(&nodePool.Spec)
+
+ if nodePool.Spec.Platform.Kubevirt != nil &&
+ nodePool.Spec.Platform.Kubevirt.RootVolume != nil {
+ nodePool.Spec.Platform.Kubevirt.RootVolume.CacheStrategy = &hyperv1.KubevirtCachingStrategy{
+ Type: hyperv1.KubevirtCachingStrategyPVC,
+ }
+ }
+
+ nodePool.Spec.Replicas = pointer.Int32(2)
+
+ return nodePool, nil
+}
diff --git a/test/e2e/nodepool_machineconfig_test.go b/test/e2e/nodepool_machineconfig_test.go
index bb2a505dd96..1d527040a7a 100644
--- a/test/e2e/nodepool_machineconfig_test.go
+++ b/test/e2e/nodepool_machineconfig_test.go
@@ -52,6 +52,11 @@ func NewNodePoolMachineconfigRolloutTest(ctx context.Context, mgmtClient crclien
}
func (mc *NodePoolMachineconfigRolloutTest) Setup(t *testing.T) {
+ if globalOpts.Platform == hyperv1.KubevirtPlatform {
+ t.Skip("test can't run for the platform KubeVirt")
+ }
+
+ t.Log("Starting test NodePoolMachineconfigRolloutTest")
}
func (mc *NodePoolMachineconfigRolloutTest) BuildNodePoolManifest(defaultNodepool hyperv1.NodePool) (*hyperv1.NodePool, error) {
diff --git a/test/e2e/nodepool_nto_machineconfig_test.go b/test/e2e/nodepool_nto_machineconfig_test.go
index 744f4ff6ae4..ed0af13bf8d 100644
--- a/test/e2e/nodepool_nto_machineconfig_test.go
+++ b/test/e2e/nodepool_nto_machineconfig_test.go
@@ -66,7 +66,13 @@ func NewNTOMachineConfigRolloutTest(ctx context.Context, mgmtClient crclient.Cli
}
}
-func (mc *NTOMachineConfigRolloutTest) Setup(t *testing.T) {}
+func (mc *NTOMachineConfigRolloutTest) Setup(t *testing.T) {
+ if globalOpts.Platform == hyperv1.KubevirtPlatform {
+ t.Skip("test can't run for the platform KubeVirt")
+ }
+
+ t.Log("Starting test NTOMachineConfigRolloutTest")
+}
func (mc *NTOMachineConfigRolloutTest) BuildNodePoolManifest(defaultNodepool hyperv1.NodePool) (*hyperv1.NodePool, error) {
nodePool := &hyperv1.NodePool{
diff --git a/test/e2e/nodepool_test.go b/test/e2e/nodepool_test.go
index 5e71d9f59d1..9a0ac023b51 100644
--- a/test/e2e/nodepool_test.go
+++ b/test/e2e/nodepool_test.go
@@ -101,6 +101,10 @@ func TestNodePool(t *testing.T) {
manifestBuilder: NewNodePoolInPlaceUpgradeTestManifest(hostedCluster, globalOpts.PreviousReleaseImage, globalOpts.LatestReleaseImage),
},
*/
+ {
+ name: "KubeVirtCacheTest",
+ test: NewKubeVirtCacheTest(ctx, mgmtClient, hostedCluster),
+ },
}
t.Run("NodePool Tests Group", func(t *testing.T) {
From 23aa9690768aa75282717e9912afe571392aca1d Mon Sep 17 00:00:00 2001
From: Nahshon Unna-Tsameret
Date: Thu, 11 May 2023 10:18:19 +0300
Subject: [PATCH 4/4] cancel AWS apecific check for non-AWS platforms
Signed-off-by: Nahshon Unna-Tsameret
---
test/e2e/nodepool_test.go | 10 ++++++----
1 file changed, 6 insertions(+), 4 deletions(-)
diff --git a/test/e2e/nodepool_test.go b/test/e2e/nodepool_test.go
index 9a0ac023b51..95b23a51e81 100644
--- a/test/e2e/nodepool_test.go
+++ b/test/e2e/nodepool_test.go
@@ -209,11 +209,13 @@ func executeNodePoolTest(t *testing.T, ctx context.Context, mgmtClient crclient.
// run test validations
nodePoolTest.Run(t, *nodePool, nodes)
- // validate default security group
err = mgmtClient.Get(ctx, crclient.ObjectKeyFromObject(nodePool), nodePool)
g.Expect(err).NotTo(HaveOccurred(), "failed to get nodePool")
- nodePoolCondition := nodepool.FindStatusCondition(nodePool.Status.Conditions, hyperv1.NodePoolAWSSecurityGroupAvailableConditionType)
- g.Expect(nodePoolCondition).ToNot(BeNil(), "%s condition not found", hyperv1.NodePoolAWSSecurityGroupAvailableConditionType)
- g.Expect(nodePoolCondition.Status).To(Equal(corev1.ConditionTrue), "condition %s is not True", hyperv1.NodePoolAWSSecurityGroupAvailableConditionType)
+ if hostedCluster.Spec.Platform.Type == hyperv1.AWSPlatform {
+ // validate default security group
+ nodePoolCondition := nodepool.FindStatusCondition(nodePool.Status.Conditions, hyperv1.NodePoolAWSSecurityGroupAvailableConditionType)
+ g.Expect(nodePoolCondition).ToNot(BeNil(), "%s condition not found", hyperv1.NodePoolAWSSecurityGroupAvailableConditionType)
+ g.Expect(nodePoolCondition.Status).To(Equal(corev1.ConditionTrue), "condition %s is not True", hyperv1.NodePoolAWSSecurityGroupAvailableConditionType)
+ }
}
|