-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathdeploy_script_exporter.sh
More file actions
executable file
·96 lines (80 loc) · 4.16 KB
/
deploy_script_exporter.sh
File metadata and controls
executable file
·96 lines (80 loc) · 4.16 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
#!/bin/bash
set -e
set -u
set -x
# These variables should not change much
USAGE="Usage: $0 <project> <keyname>"
PROJECT=${1:?Please provide project name: $USAGE}
KEYNAME=${2:?Please provide an authentication key name: $USAGE}
SCP_FILES="apply_tc_rules.sh Dockerfile ndt_e2e.sh script_exporter.yml"
IMAGE_TAG="m-lab/script-exporter-support"
GCE_ZONE="us-central1-a"
GCE_NAME="script-exporter"
GCE_IP_NAME="script-exporter-public-ip"
GCE_IMG_PROJECT="coreos-cloud"
GCE_IMG_FAMILY="coreos-stable"
# The script_exporter targets for each project only include the nodes specific
# to that project. That is, the mlab-sandbox project will only have targets for
# testing nodes, which is a small number. And the mlab-staging project will
# only have targets for mlab4 nodes, which is more nodes than the mlab-sandbox
# project. The mlab-oti project will have significantly more targets, all
# mlab[1-3]s, than the other projects. Because of this, the demands on CPU and
# memory will vary. This case statement allows us to set per project GCE
# instance machine types to account for expected load.
case $PROJECT in
mlab-oti)
MACHINE_TYPE="n1-standard-8"
;;
mlab-staging)
MACHINE_TYPE="n1-standard-8"
;;
*)
MACHINE_TYPE="n1-standard-1"
;;
esac
# Add gcloud to PATH.
source "${HOME}/google-cloud-sdk/path.bash.inc"
# Add m-lab/travis help lib
source "$TRAVIS_BUILD_DIR/travis/gcloudlib.sh"
# Set the project and zone for all future gcloud commands.
gcloud config set project $PROJECT
gcloud config set compute/zone $GCE_ZONE
# Authenticate the service account using KEYNAME.
activate_service_account "${KEYNAME}"
# Make sure that the files we want to copy actually exist.
for scp_file in ${SCP_FILES}; do
if [[ ! -e "${TRAVIS_BUILD_DIR}/${scp_file}" ]]; then
echo "Missing required file/dir: ${TRAVIS_BUILD_DIR}/${scp_file}!"
exit 1
fi
done
# Delete the existing GCE instance, if it exists. gcloud has an exit status of 0
# whether any instances are found or not. When no instances are found, a short
# message is echoed to stderr. When an instance is found a summary is echoed to
# stdout. If $EXISTING_INSTANCE is not null then we infer that the instance
# already exists.
EXISTING_INSTANCE=$(gcloud compute instances list --filter "name=${GCE_NAME}")
if [[ -n "${EXISTING_INSTANCE}" ]]; then
gcloud compute instances delete $GCE_NAME --quiet
fi
# Create the new GCE instance. NOTE: $GCE_IP_NAME *must* refer to an existing
# static external IP address for the project.
gcloud compute instances create $GCE_NAME --address $GCE_IP_NAME \
--image-project $GCE_IMG_PROJECT --image-family $GCE_IMG_FAMILY \
--tags $GCE_NAME --metadata-from-file user-data=cloud-config.yml \
--machine-type $MACHINE_TYPE
# Give the GCE instance another 30s to fully become available. From time to time
# the Travis-CI build fails because it can't connect via SSH.
sleep 30
# Get the internal VPC IP of the new instance.
INTERNAL_IP=$(gcloud compute instances list \
--format="value(networkInterfaces[0].networkIP)" \
--filter="name=${GCE_NAME}")
# Copy required snmp_exporter files to the GCE instance.
gcloud compute scp $SCP_FILES $GCE_NAME:~
# Build the snmp_exporter Docker container.
gcloud compute ssh $GCE_NAME --command "docker build -t ${IMAGE_TAG} ."
# Start a new container based on the new/updated image.
gcloud compute ssh $GCE_NAME --command "docker run --detach --restart always --publish ${INTERNAL_IP}:9172:9172 --name ${GCE_NAME} --cap-add NET_ADMIN ${IMAGE_TAG}"
# Run Prometheus node_exporter in a container so we can gather VM metrics.
gcloud compute ssh $GCE_NAME --command "docker run --detach --restart always --publish ${INTERNAL_IP}:9100:9100 --name node-exporter --volume /proc:/host/proc --volume /sys:/host/sys prom/node-exporter --path.procfs /host/proc --path.sysfs /host/sys --no-collector.arp --no-collector.bcache --no-collector.conntrack --no-collector.edac --no-collector.entropy --no-collector.filefd --no-collector.hwmon --no-collector.infiniband --no-collector.ipvs --no-collector.mdadm --no-collector.netstat --no-collector.sockstat --no-collector.time --no-collector.timex --no-collector.uname --no-collector.vmstat --no-collector.wifi --no-collector.xfs --no-collector.zfs"