Skip to content

Commit b60efdb

Browse files
committed
Use the Hypervisor CRD instead of labels.
When there is no maintenance-contoller profile the GardenerLifecycleController will now set the hypervisor cro maintenance field. For confirmation, it will look on the conditions. That renders the labels obsolete.
1 parent 51e4cc5 commit b60efdb

File tree

6 files changed

+101
-398
lines changed

6 files changed

+101
-398
lines changed

cmd/main.go

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -243,14 +243,6 @@ func main() {
243243
os.Exit(1)
244244
}
245245

246-
if err = (&controller.NodeEvictionLabelReconciler{
247-
Client: mgr.GetClient(),
248-
Scheme: mgr.GetScheme(),
249-
}).SetupWithManager(mgr); err != nil {
250-
setupLog.Error(err, "unable to create controller", "controller", "Node")
251-
os.Exit(1)
252-
}
253-
254246
if err = (&controller.NodeDecommissionReconciler{
255247
Client: mgr.GetClient(),
256248
Scheme: mgr.GetScheme(),

internal/controller/gardener_node_lifecycle_controller.go

Lines changed: 19 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,6 @@ import (
3232
corev1ac "k8s.io/client-go/applyconfigurations/core/v1"
3333
v1 "k8s.io/client-go/applyconfigurations/meta/v1"
3434
policyv1ac "k8s.io/client-go/applyconfigurations/policy/v1"
35-
"k8s.io/client-go/util/retry"
3635
ctrl "sigs.k8s.io/controller-runtime"
3736
k8sclient "sigs.k8s.io/controller-runtime/pkg/client"
3837
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
@@ -72,40 +71,40 @@ func (r *GardenerNodeLifecycleController) Reconcile(ctx context.Context, req ctr
7271
return ctrl.Result{}, k8sclient.IgnoreNotFound(err)
7372
}
7473

75-
hv := kvmv1.Hypervisor{}
76-
if err := r.Get(ctx, k8sclient.ObjectKey{Name: req.Name}, &hv); k8sclient.IgnoreNotFound(err) != nil {
74+
hv := &kvmv1.Hypervisor{}
75+
if err := r.Get(ctx, k8sclient.ObjectKey{Name: req.Name}, hv); k8sclient.IgnoreNotFound(err) != nil {
7776
return ctrl.Result{}, err
7877
}
78+
7979
if !hv.Spec.LifecycleEnabled {
8080
// Nothing to be done
8181
return ctrl.Result{}, nil
8282
}
8383

84-
if isTerminating(node) {
85-
changed, err := setNodeLabels(ctx, r.Client, node, map[string]string{labelEvictionRequired: valueReasonTerminating})
86-
if changed || err != nil {
87-
return ctrl.Result{}, err
88-
}
89-
}
90-
9184
// We do not care about the particular value, as long as it isn't an error
9285
var minAvailable int32 = 1
93-
evictionValue, found := node.Labels[labelEvictionApproved]
94-
if found && evictionValue != "false" {
86+
87+
// Onboarding is not in progress anymore, i.e. the host is onboarded
88+
onboardingCompleted := meta.IsStatusConditionFalse(hv.Status.Conditions, kvmv1.ConditionTypeOnboarding)
89+
// Evicting is not in progress anymore, i.e. the host is empty
90+
evictionComplete := meta.IsStatusConditionFalse(hv.Status.Conditions, kvmv1.ConditionTypeEvicting)
91+
92+
if evictionComplete {
9593
minAvailable = 0
94+
95+
if onboardingCompleted && isTerminating(node) {
96+
// Onboarded & terminating & eviction complete -> disable HA
97+
if err := disableInstanceHA(hv); err != nil {
98+
return ctrl.Result{}, err
99+
}
100+
}
96101
}
97102

98-
if err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
99-
return r.ensureBlockingPodDisruptionBudget(ctx, node, minAvailable)
100-
}); err != nil {
103+
if err := r.ensureBlockingPodDisruptionBudget(ctx, node, minAvailable); err != nil {
101104
return ctrl.Result{}, err
102105
}
103106

104-
onboardingCompleted := meta.IsStatusConditionFalse(hv.Status.Conditions, kvmv1.ConditionTypeOnboarding)
105-
106-
if err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
107-
return r.ensureSignallingDeployment(ctx, node, minAvailable, onboardingCompleted)
108-
}); err != nil {
107+
if err := r.ensureSignallingDeployment(ctx, node, minAvailable, onboardingCompleted); err != nil {
109108
return ctrl.Result{}, err
110109
}
111110

internal/controller/gardener_node_lifecycle_controller_test.go

Lines changed: 82 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -18,51 +18,115 @@ limitations under the License.
1818
package controller
1919

2020
import (
21+
"fmt"
22+
2123
. "github.com/onsi/ginkgo/v2"
2224
. "github.com/onsi/gomega"
25+
appsv1 "k8s.io/api/apps/v1"
2326
corev1 "k8s.io/api/core/v1"
27+
policyv1 "k8s.io/api/policy/v1"
28+
"k8s.io/apimachinery/pkg/api/meta"
2429
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2530
"k8s.io/apimachinery/pkg/types"
2631
ctrl "sigs.k8s.io/controller-runtime"
27-
"sigs.k8s.io/controller-runtime/pkg/client"
32+
33+
kvmv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1"
2834
)
2935

3036
var _ = Describe("Gardener Maintenance Controller", func() {
3137
const nodeName = "node-test"
32-
var controller *GardenerNodeLifecycleController
38+
var (
39+
controller *GardenerNodeLifecycleController
40+
name = types.NamespacedName{Name: nodeName}
41+
reconcileReq = ctrl.Request{NamespacedName: name}
42+
maintenanceName = types.NamespacedName{Name: fmt.Sprintf("maint-%v", nodeName), Namespace: "kube-system"}
43+
)
3344

3445
BeforeEach(func(ctx SpecContext) {
3546
controller = &GardenerNodeLifecycleController{
3647
Client: k8sClient,
3748
Scheme: k8sClient.Scheme(),
3849
}
3950

40-
By("creating the namespace for the reconciler")
41-
ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "monsoon3"}}
42-
Expect(client.IgnoreAlreadyExists(k8sClient.Create(ctx, ns))).To(Succeed())
43-
4451
By("creating the core resource for the Kind Node")
45-
resource := &corev1.Node{
52+
node := &corev1.Node{
53+
ObjectMeta: metav1.ObjectMeta{
54+
Name: nodeName,
55+
},
56+
}
57+
Expect(k8sClient.Create(ctx, node)).To(Succeed())
58+
DeferCleanup(func(ctx SpecContext) {
59+
By("Cleanup the specific node")
60+
Expect(k8sClient.Delete(ctx, node)).To(Succeed())
61+
})
62+
63+
By("creating the core resource for the Kind hypervisor")
64+
hypervisor := &kvmv1.Hypervisor{
4665
ObjectMeta: metav1.ObjectMeta{
47-
Name: nodeName,
48-
Labels: map[string]string{labelEvictionRequired: "true"},
66+
Name: nodeName,
67+
},
68+
Spec: kvmv1.HypervisorSpec{
69+
LifecycleEnabled: true,
4970
},
5071
}
51-
Expect(k8sClient.Create(ctx, resource)).To(Succeed())
72+
Expect(k8sClient.Create(ctx, hypervisor)).To(Succeed())
5273
DeferCleanup(func(ctx SpecContext) {
53-
Expect(client.IgnoreNotFound(k8sClient.Delete(ctx, resource))).To(Succeed())
74+
Expect(k8sClient.Delete(ctx, hypervisor)).To(Succeed())
5475
})
5576
})
5677

5778
Context("When reconciling a node", func() {
58-
It("should successfully reconcile the resource", func(ctx SpecContext) {
59-
req := ctrl.Request{
60-
NamespacedName: types.NamespacedName{Name: nodeName},
61-
}
62-
63-
By("Reconciling the created resource")
64-
_, err := controller.Reconcile(ctx, req)
79+
JustBeforeEach(func(ctx SpecContext) {
80+
_, err := controller.Reconcile(ctx, reconcileReq)
6581
Expect(err).NotTo(HaveOccurred())
6682
})
83+
It("should create a poddisruptionbudget", func(ctx SpecContext) {
84+
pdb := &policyv1.PodDisruptionBudget{}
85+
Expect(k8sClient.Get(ctx, maintenanceName, pdb)).To(Succeed())
86+
Expect(pdb.Spec.MinAvailable).To(HaveField("IntVal", BeNumerically("==", 1)))
87+
})
88+
89+
It("should create a failing deployment to signal onboarding not being completed", func(ctx SpecContext) {
90+
dep := &appsv1.Deployment{}
91+
Expect(k8sClient.Get(ctx, maintenanceName, dep)).To(Succeed())
92+
Expect(dep.Spec.Template.Spec.Containers).To(HaveLen(1))
93+
Expect(dep.Spec.Template.Spec.Containers[0].StartupProbe.Exec.Command).To(Equal([]string{"/bin/false"}))
94+
})
95+
96+
When("the node has been onboarded", func() {
97+
BeforeEach(func(ctx SpecContext) {
98+
hypervisor := &kvmv1.Hypervisor{}
99+
Expect(k8sClient.Get(ctx, name, hypervisor)).To(Succeed())
100+
meta.SetStatusCondition(&hypervisor.Status.Conditions, metav1.Condition{
101+
Type: kvmv1.ConditionTypeOnboarding,
102+
Status: metav1.ConditionFalse,
103+
Reason: "dontcare",
104+
Message: "dontcare",
105+
})
106+
Expect(k8sClient.Status().Update(ctx, hypervisor)).To(Succeed())
107+
})
108+
109+
It("should create a deployment with onboarding completed", func(ctx SpecContext) {
110+
dep := &appsv1.Deployment{}
111+
Expect(k8sClient.Get(ctx, maintenanceName, dep)).To(Succeed())
112+
Expect(dep.Spec.Template.Spec.Containers).To(HaveLen(1))
113+
Expect(dep.Spec.Template.Spec.Containers[0].StartupProbe.Exec.Command).To(Equal([]string{"/bin/true"}))
114+
})
115+
})
116+
117+
When("the node has been evicted", func() {
118+
BeforeEach(func(ctx SpecContext) {
119+
hypervisor := &kvmv1.Hypervisor{}
120+
Expect(k8sClient.Get(ctx, name, hypervisor)).To(Succeed())
121+
meta.SetStatusCondition(&hypervisor.Status.Conditions, metav1.Condition{
122+
Type: kvmv1.ConditionTypeEvicting,
123+
Status: metav1.ConditionFalse,
124+
Reason: "dontcare",
125+
Message: "dontcare",
126+
})
127+
Expect(k8sClient.Status().Update(ctx, hypervisor)).To(Succeed())
128+
})
129+
})
130+
67131
})
68132
})

0 commit comments

Comments
 (0)