-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmonitoring.yaml
More file actions
370 lines (350 loc) · 12.4 KB
/
monitoring.yaml
File metadata and controls
370 lines (350 loc) · 12.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
---
# All hosts should have the node exporter installed
# This will install the official role on a baremetal or VM
# Do not run this particular exporter on an image
# Excludes images
- name: Install node exporter
hosts: all:!images
roles:
- role: prometheus.prometheus.node_exporter
vars:
node_exporter_enabled_collectors:
- systemd
- textfile:
directory: /var/lib/node_exporter/textfile_collector
- processes
- filesystem:
ignored-fs-types: "^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs|fuse\\.quobyte|nfs|beegfs)$"
# This section will install most services on the monitoring server
- name: Main monitoring services install
hosts: monitoring
vars:
###########################################################
## All configuration changes should go here ###############
###########################################################
# Value should come from an encrypted vault
# Also this key is generated by an opsgenie team admin with the prometheus integration
# opsgenie_api_key: "{{ vault_opsgenie_api_key }}"
opsgenie_api_key: "somethingspecial"
# Name of the opsgenie team configured
opsgenie_team: "UTHSCSA"
# A heartbeat should be created and this should be its unique name
opsgenie_heartbeat: "somethingnew"
# List of alertmanager peers for HA setup
# Should be a list of internal ips. For dev can be one
alertmanager_peers:
- localhost:9094
# Prometheus targets. Should be one list per scrape job
# Storage servers running the node exporter
node_storage_targets:
- 127.0.0.1:9100
# Service nodes (headnode, login) running the node exporter
node_service_targets:
- 127.0.0.1:9100
# Compute nodes running the node exporter
node_compute_targets:
- 127.0.0.1:9100
# Compute nodes running the DCGM Exporter
dcgm_targets:
- 127.0.0.1:9400
# blackbox ICMP (Ping) targets. Just a list of ip addresses
blackbox_icmp_targets:
- 127.0.0.1
- localhost
- 8.8.8.8
- 192.168.1.1
# blackbox SSH targets. Just a list of hosts. Could be more than one per host
blackbox_ssh_targets:
- localhost
# Blackbox DNS. List of DNS servers to query. Per the config later, will lookup www.google.com
blackbox_dns_targets:
- 8.8.8.8
- 8.8.4.4
# slurm exporter targets
slurm_targets:
- localhost:9200
# infiniband exporter targets
infiniband_targets:
- localhost:9683
# ZFS Exporter targets.
zfs_targets:
- localhost:9134
# Hostname to query for blackbox dns checks
blackbox_dns_query: "www.google.com"
# Grafana admin default password
# grafana_password: "{{ vault_grafana_password }}"
grafana_password: "admin"
# WARNING: This should not be named grafana_dashboards because it will cause the role to fail prematurely
# List of grafana dashboards
grafana_dashboard_list:
# Node exporter
- dashboard_id: 1860
revision_id: 33
datasource: prometheus
# Alertmanager
- dashboard_id: 9578
revision_id: 4
datasource: prometheus
#zfs exporter
- dashboard_id: 12586
revision_id: 1
datasource: prometheus
# Infiniband detailed
- dashboard_id: 14992
revision_id: 2
datasource: prometheus
# Infiniband overview
- dashboard_id: 14991
revision_id: 2
datasource: prometheus
# Slurm exporter
- dashboard_id: 4323
revision_id: 3
datasource: prometheus
# If required, set the prometheus exporter user id. Default is 440 but may conflict
prometheus_exporter_uid: 460
roles:
# Install and configure the prometheus alertmanager
- role: prometheus.prometheus.alertmanager
vars:
alertmanager_cluster:
peers: "{{ alertmanager_peers }}"
listen-address: 0.0.0.0:9094
alertmanager_opsgenie_api_key: "{{ opsgenie_api_key }}"
alertmanager_receivers:
- name: 'default-receiver'
- name: opsgenie
opsgenie_configs:
- priority: '{% raw %}{{ if .CommonAnnotations.priority }}{{ .CommonAnnotations.priority}}{{ else }}P3{{ end }}{% endraw %}'
description: '{% raw %}{{ .CommonAnnotations.description }}{% endraw %}'
responders:
- name: "{{ opsgenie_team }}"
type: "team"
- name: heartbeat
webhook_configs:
- send_resolved: true
url: "https://api.opsgenie.com/v2/heartbeats/{{ opsgenie_heartbeat }}/ping"
http_config:
basic_auth:
password: "{{ opsgenie_api_key }}"
alertmanager_route:
receiver: 'default-receiver'
routes:
- match:
alertname: Watchdog
repeat_interval: 60s
receiver: heartbeat
- receiver: opsgenie
group_by: [...]
# Install and configure the main prometheus server and database
- role: prometheus.prometheus.prometheus
vars:
prometheus_alertmanager_config:
- scheme: http
static_configs:
# May need to be tweaked if alertmanager is not running on the same host
# as prometheus. Otherwise should be normal
- targets: ["127.0.0.1:9093"]
prometheus_targets:
node_storage:
- targets: "{{ node_storage_targets }}"
labels:
env: prod
node_service:
- targets: "{{ node_service_targets }}"
labels:
env: prod
node_compute:
- targets: "{{ node_compute_targets }}"
labels:
env: prod
dcgm:
- targets: "{{ dcgm_targets }}"
labels:
env: prod
blackbox_icmp:
- targets: "{{ blackbox_icmp_targets }}"
labels:
env: prod
blackbox_ssh:
- targets: "{{ blackbox_ssh_targets }}"
labels:
env: prod
blackbox_dns:
- targets: "{{ blackbox_dns_targets }}"
labels:
env: prod
slurm:
- targets: "{{ slurm_targets }}"
labels:
env: prod
infiniband:
- targets: "{{ infiniband_targets }}"
labels:
env: prod
zfs:
- targets: "{{ zfs_targets }}"
labels:
env: prod
prometheus_scrape_configs:
- job_name: "prometheus"
metrics_path: "{{ prometheus_metrics_path }}"
static_configs:
- targets:
- "localhost:9090"
- job_name: "node_service"
file_sd_configs:
- files:
- "{{ prometheus_config_dir }}/file_sd/node_service.yml"
- job_name: "node_compute"
file_sd_configs:
- files:
- "{{ prometheus_config_dir }}/file_sd/node_compute.yml"
- job_name: "node_storage"
file_sd_configs:
- files:
- "{{ prometheus_config_dir }}/file_sd/node_storage.yml"
- job_name: "smartctl"
file_sd_configs:
- files:
- "{{ prometheus_config_dir }}/file_sd/smartctl.yml"
- job_name: "zfs"
file_sd_configs:
- files:
- "{{ prometheus_config_dir }}/file_sd/zfs.yml"
- job_name: "infiniband"
file_sd_configs:
- files:
- "{{ prometheus_config_dir }}/file_sd/infiniband.yml"
- job_name: "dcgm"
file_sd_configs:
- files:
- "{{ prometheus_config_dir }}/file_sd/dcgm.yml"
- job_name: "blackbox_icmp"
file_sd_configs:
- files:
- "{{ prometheus_config_dir }}/file_sd/blackbox_icmp.yml"
params:
module: [icmp]
metrics_path: "/probe"
relabel_configs:
- source_labels: [__address__]
target_label: __param_target
- source_labels: [__param_target]
target_label: instance
- target_label: __address__
replacement: "localhost:9115"
- job_name: slurm
file_sd_configs:
- files:
- "{{ prometheus_config_dir }}/file_sd/slurm.yml"
- job_name: "blackbox_ssh"
file_sd_configs:
- files:
- "{{ prometheus_config_dir }}/file_sd/blackbox_ssh.yml"
params:
module: [ssh_banner]
metrics_path: "/probe"
relabel_configs:
- source_labels: [__address__]
regex: (.*?)(:.*)?
replacement: ${1}:22
target_label: __param_target
- source_labels: [__param_target]
target_label: instance
- target_label: __address__
replacement: "localhost:9115"
- job_name: "blackbox_dns"
file_sd_configs:
- files:
- "{{ prometheus_config_dir }}/file_sd/blackbox_dns.yml"
params:
module: [dns_test]
metrics_path: "/probe"
relabel_configs:
- source_labels: [__address__]
target_label: __param_target
- source_labels: [__param_target]
target_label: instance
- target_label: __address__
replacement: "localhost:9115"
# Installa nd configure blackbox
- role: prometheus.prometheus.blackbox_exporter
vars:
blackbox_exporter_configuration_modules:
icmp:
prober: icmp
timeout: 5s
icmp:
preferred_ip_protocol: ip4
http_2xx:
prober: http
timeout: 5s
http:
method: GET
valid_status_codes: []
ssh_banner:
prober: tcp
timeout: 5s
tcp:
query_response:
- expect: "^SSH-2.0-"
dns_test:
prober: dns
timeout: 5s
dns:
transport_protocol: "udp"
preferred_ip_protocol: "ip4"
query_name: "{{ blackbox_dns_query }}"
query_type: "A"
valid_rcodes:
- NOERROR
# Main grafana install and configuration
- role: grafana.grafana.grafana
vars:
grafana_alerting: {}
grafana_security:
admin_user: admin
admin_password: "{{ grafana_password }}"
grafana_datasources:
- name: prometheus
type: prometheus
# Assuming grafana is on the same host this should work
url: 'http://localhost:9090'
grafana_dashboards: "{{ grafana_dashboard_list }}"
grafana_server:
enforce_domain: false
socket: ""
enable_gzip: false
static_root_path: public
router_logging: false
serve_from_sub_path: false
# There is an rpm now for the ib exporter we will need to rewrite the role to use
# Also, if the monitoring server is a VM the exporter may need
# to be moved to a different host
- prometheus_infiniband_exporter
# This is run here so that its downloaded after the prometheus directories are created
- name: Copy down alert rules
hosts: monitoring
tasks:
- ansible.builtin.get_url:
url: https://raw.githubusercontent.com/dstdev/ansible-roles/development/prometheus-server/files/alert-dst.rules
dest: /etc/prometheus/rules/alert-dst.rules
owner: root
group: root
notify:
- Reload Prometheus
handlers:
- name: Reload Prometheus
ansible.builtin.service:
state: reloaded
name: prometheus
# slurm may not be running on the monitoring node so lets target it specifically
- name: Install exporter on headnodes
hosts: headnodes
roles:
- prometheus_slurm_exporter
- name: Install zfs exporter on defined storage nodes
hosts: zfs
roles:
- prometheus_zfs_exporter