|
| 1 | +package metrics |
| 2 | + |
| 3 | +import ( |
| 4 | + "context" |
| 5 | + "fmt" |
| 6 | + "sync" |
| 7 | + "time" |
| 8 | + |
| 9 | + "github.com/sirupsen/logrus" |
| 10 | + |
| 11 | + corev1 "k8s.io/api/core/v1" |
| 12 | + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" |
| 13 | + |
| 14 | + machinev1beta1 "github.com/openshift/api/machine/v1beta1" |
| 15 | + autoscalingv1beta1 "github.com/openshift/cluster-autoscaler-operator/pkg/apis/autoscaling/v1beta1" |
| 16 | +) |
| 17 | + |
| 18 | +type MachinesEventType string |
| 19 | + |
| 20 | +const ( |
| 21 | + PodCreation MachinesEventType = "pod_creation" |
| 22 | + PodCompletion MachinesEventType = "pod_completion" |
| 23 | +) |
| 24 | + |
| 25 | +type MachineInfo struct { |
| 26 | + Name string `json:"name"` |
| 27 | + Phase string `json:"phase"` |
| 28 | +} |
| 29 | + |
| 30 | +type AutoscalerInfo struct { |
| 31 | + Name string `json:"name"` |
| 32 | + Min int `json:"min"` |
| 33 | + Max int `json:"max"` |
| 34 | +} |
| 35 | + |
| 36 | +type MachineSetCount struct { |
| 37 | + Name string `json:"name"` |
| 38 | + Current int `json:"current"` |
| 39 | + Autoscaler *AutoscalerInfo `json:"autoscaler,omitempty"` |
| 40 | + Machines []MachineInfo `json:"machines,omitempty"` |
| 41 | +} |
| 42 | + |
| 43 | +type WorkloadNodeCount struct { |
| 44 | + Workload string `json:"workload"` |
| 45 | + TotalMachines int `json:"total_machines"` |
| 46 | + MachineSets []MachineSetCount `json:"machine_sets"` |
| 47 | +} |
| 48 | + |
| 49 | +type MachinesEvent struct { |
| 50 | + Type MachinesEventType `json:"type"` |
| 51 | + PodName string `json:"pod_name"` |
| 52 | + Namespace string `json:"namespace"` |
| 53 | + Workload string `json:"workload"` |
| 54 | + WorkloadCapacity WorkloadNodeCount `json:"workload_capacity"` |
| 55 | + Timestamp time.Time `json:"timestamp"` |
| 56 | +} |
| 57 | + |
| 58 | +func (e *MachinesEvent) SetTimestamp(ts time.Time) { |
| 59 | + e.Timestamp = ts |
| 60 | +} |
| 61 | + |
| 62 | +type MachinesPlugin struct { |
| 63 | + ctx context.Context |
| 64 | + logger *logrus.Entry |
| 65 | + mu sync.Mutex |
| 66 | + events []MachinesEvent |
| 67 | + client ctrlruntimeclient.Client |
| 68 | + autoscalers []autoscalingv1beta1.MachineAutoscaler |
| 69 | +} |
| 70 | + |
| 71 | +func NewMachinesPlugin(ctx context.Context, logger *logrus.Entry, client ctrlruntimeclient.Client, autoscalers []autoscalingv1beta1.MachineAutoscaler) *MachinesPlugin { |
| 72 | + return &MachinesPlugin{ |
| 73 | + ctx: ctx, |
| 74 | + logger: logger.WithField("plugin", "machines"), |
| 75 | + client: client, |
| 76 | + autoscalers: autoscalers, |
| 77 | + } |
| 78 | +} |
| 79 | + |
| 80 | +func (p *MachinesPlugin) Name() string { |
| 81 | + return "machines" |
| 82 | +} |
| 83 | + |
| 84 | +func (p *MachinesPlugin) Record(ev MetricsEvent) { |
| 85 | + e, ok := ev.(*MachinesEvent) |
| 86 | + if !ok { |
| 87 | + return |
| 88 | + } |
| 89 | + |
| 90 | + if e.Type == PodCreation && e.Workload == "" { |
| 91 | + workload, err := p.waitForWorkloadLabel(e.Namespace, e.PodName) |
| 92 | + if err != nil { |
| 93 | + p.logger.WithError(err).Warnf("Failed to get workload label for pod %s/%s", e.Namespace, e.PodName) |
| 94 | + return |
| 95 | + } |
| 96 | + e.Workload = workload |
| 97 | + } |
| 98 | + |
| 99 | + e.WorkloadCapacity = p.getWorkloadCounts(e.Workload) |
| 100 | + |
| 101 | + p.mu.Lock() |
| 102 | + defer p.mu.Unlock() |
| 103 | + p.logger.WithField("event", e).Debug("Recorded machines event") |
| 104 | + p.events = append(p.events, *e) |
| 105 | +} |
| 106 | + |
| 107 | +func (p *MachinesPlugin) waitForWorkloadLabel(namespace, podName string) (string, error) { |
| 108 | + ticker := time.NewTicker(time.Second) |
| 109 | + defer ticker.Stop() |
| 110 | + |
| 111 | + timeout := time.After(time.Minute) |
| 112 | + for { |
| 113 | + select { |
| 114 | + case <-p.ctx.Done(): |
| 115 | + return "", fmt.Errorf("context cancelled") |
| 116 | + case <-timeout: |
| 117 | + return "", fmt.Errorf("timed out waiting for ci-workload label") |
| 118 | + case <-ticker.C: |
| 119 | + pod := &corev1.Pod{} |
| 120 | + if err := p.client.Get(p.ctx, ctrlruntimeclient.ObjectKey{Namespace: namespace, Name: podName}, pod); err != nil { |
| 121 | + p.logger.WithError(err).Debugf("Failed to get pod %s/%s while waiting for workload label", namespace, podName) |
| 122 | + continue |
| 123 | + } |
| 124 | + |
| 125 | + workload := pod.Labels[CIWorkloadLabel] |
| 126 | + if workload != "" { |
| 127 | + return workload, nil |
| 128 | + } |
| 129 | + } |
| 130 | + } |
| 131 | +} |
| 132 | + |
| 133 | +func (p *MachinesPlugin) Events() []MetricsEvent { |
| 134 | + p.mu.Lock() |
| 135 | + defer p.mu.Unlock() |
| 136 | + out := make([]MetricsEvent, len(p.events)) |
| 137 | + for i := range p.events { |
| 138 | + out[i] = &p.events[i] |
| 139 | + } |
| 140 | + return out |
| 141 | +} |
| 142 | + |
| 143 | +func (p *MachinesPlugin) getAutoscaler(machineSetName string) *AutoscalerInfo { |
| 144 | + for _, autoscaler := range p.autoscalers { |
| 145 | + if autoscaler.Spec.ScaleTargetRef.Name == machineSetName { |
| 146 | + return &AutoscalerInfo{ |
| 147 | + Name: autoscaler.Name, |
| 148 | + Min: int(autoscaler.Spec.MinReplicas), |
| 149 | + Max: int(autoscaler.Spec.MaxReplicas), |
| 150 | + } |
| 151 | + } |
| 152 | + } |
| 153 | + return nil |
| 154 | +} |
| 155 | + |
| 156 | +func (p *MachinesPlugin) getWorkloadCounts(workload string) WorkloadNodeCount { |
| 157 | + ret := WorkloadNodeCount{Workload: workload} |
| 158 | + machineSetList := &machinev1beta1.MachineSetList{} |
| 159 | + if err := p.client.List(p.ctx, machineSetList, ctrlruntimeclient.InNamespace(MachineAPINamespace)); err != nil { |
| 160 | + p.logger.WithError(err).Warn("Failed to list MachineSets") |
| 161 | + return WorkloadNodeCount{} |
| 162 | + } |
| 163 | + |
| 164 | + for _, ms := range machineSetList.Items { |
| 165 | + msWorkload := ms.Spec.Template.Spec.ObjectMeta.Labels[CIWorkloadLabel] |
| 166 | + if msWorkload != workload { |
| 167 | + continue |
| 168 | + } |
| 169 | + |
| 170 | + current := int(ms.Status.Replicas) |
| 171 | + autoscaler := p.getAutoscaler(ms.Name) |
| 172 | + |
| 173 | + machineList := &machinev1beta1.MachineList{} |
| 174 | + if err := p.client.List(p.ctx, machineList, |
| 175 | + ctrlruntimeclient.InNamespace(MachineAPINamespace), |
| 176 | + ctrlruntimeclient.MatchingLabels{MachineSetLabel: ms.Name}); err != nil { |
| 177 | + p.logger.WithError(err).Warnf("Failed to list Machines for MachineSet %s", ms.Name) |
| 178 | + continue |
| 179 | + } |
| 180 | + |
| 181 | + var machines []MachineInfo |
| 182 | + for _, machine := range machineList.Items { |
| 183 | + phase := "Unknown" |
| 184 | + if machine.Status.Phase != nil { |
| 185 | + phase = *machine.Status.Phase |
| 186 | + } |
| 187 | + machines = append(machines, MachineInfo{ |
| 188 | + Name: machine.Name, |
| 189 | + Phase: phase, |
| 190 | + }) |
| 191 | + } |
| 192 | + |
| 193 | + ret.TotalMachines += current |
| 194 | + ret.MachineSets = append(ret.MachineSets, MachineSetCount{ |
| 195 | + Name: ms.Name, |
| 196 | + Current: current, |
| 197 | + Autoscaler: autoscaler, |
| 198 | + Machines: machines, |
| 199 | + }) |
| 200 | + } |
| 201 | + |
| 202 | + return ret |
| 203 | +} |
0 commit comments