|
| 1 | +#!/bin/bash |
| 2 | + |
| 3 | +# |
| 4 | +# This script is to create a dev namespace on Staging environment |
| 5 | +# |
| 6 | +PROJECT_NAME=<% .Name %> |
| 7 | +ENVIRONMENT=stage |
| 8 | +ACCOUNT_ID=<% index .Params `accountId` %> |
| 9 | +REGION=<% index .Params `region` %> |
| 10 | + |
| 11 | +# common functions |
| 12 | +function usage() { |
| 13 | + echo |
| 14 | + echo "Usage:" |
| 15 | + echo " $0 <project id>" |
| 16 | + echo " - project id: can be 001, 002, or whatever id without space" |
| 17 | + exit 1 |
| 18 | +} |
| 19 | + |
| 20 | +function command_exist() { |
| 21 | + command -v ${1} >& /dev/null |
| 22 | +} |
| 23 | + |
| 24 | +function error_exit() { |
| 25 | + echo "ERROR : $1" |
| 26 | + exit 2 |
| 27 | +} |
| 28 | + |
| 29 | +function can_i() { |
| 30 | + commands=$1 |
| 31 | + IFS=',' read -r -a array <<< "$commands" |
| 32 | + err=0 |
| 33 | + for command in "${array[@]}" |
| 34 | + do |
| 35 | + kubectl --context ${CLUSTER_CONTEXT} auth can-i $command >& /dev/null || (echo "No permission to '$command'" && let "err+=1") |
| 36 | + done |
| 37 | + |
| 38 | + [[ $err -gt 0 ]] && error_exit "Found $err permission errors. Please check with your administrator." |
| 39 | + |
| 40 | + echo "Permission checks: passed" |
| 41 | + return 0 |
| 42 | +} |
| 43 | + |
| 44 | +# Start |
| 45 | +# Validate current iam user |
| 46 | +MY_USERNAME=$(aws sts get-caller-identity --output json | jq -r .Arn | cut -d/ -f2) |
| 47 | +DEV_USERS=$(aws iam get-group --group-name ${PROJECT_NAME}-developer-${ENVIRONMENT} | jq -r .Users[].UserName) |
| 48 | +[[ "${DEV_USERS[@]}" =~ "${MY_USERNAME}" ]] || error_exit "You (${MY_USERNAME}) are not in the ${PROJECT_NAME}-developer-${ENVIRONMENT} IAM group." |
| 49 | + |
| 50 | +DEV_PROJECT_ID=${1:-""} |
| 51 | + |
| 52 | +echo '[Dev Environment]' |
| 53 | + |
| 54 | +# Validate cluster |
| 55 | +CLUSTER_CONTEXT=${PROJECT_NAME}-${ENVIRONMENT}-${REGION} |
| 56 | +echo " Cluster context: ${CLUSTER_CONTEXT}" |
| 57 | + |
| 58 | +# Validate secret |
| 59 | +NAMESPACE=${PROJECT_NAME} |
| 60 | +SECRET_NAME=${PROJECT_NAME} |
| 61 | +DEV_SECRET_NAME=devenv${PROJECT_NAME} |
| 62 | +DEV_SECRET_JSON=$(kubectl --context ${CLUSTER_CONTEXT} get secret ${DEV_SECRET_NAME} -n ${NAMESPACE} -o json) |
| 63 | +[[ -z "${DEV_SECRET_JSON}" ]] && error_exit "The secret ${DEV_SECRET_NAME} is not existing in namespace '${NAMESPACE}'." |
| 64 | + |
| 65 | +# Check installations |
| 66 | +if ! command_exist kustomize || ! command_exist telepresence; then |
| 67 | + if ! command_exist kustomize; then |
| 68 | + error_exit "command 'kustomize' not found: please visit https://kubectl.docs.kubernetes.io/installation/kustomize/" |
| 69 | + fi |
| 70 | + if ! command_exist kubectl; then |
| 71 | + error_exit "command 'telepresence' not found. You can download it at https://www.telepresence.io/reference/install" |
| 72 | + fi |
| 73 | +fi |
| 74 | + |
| 75 | +# Setup dev namepsace |
| 76 | +DEV_NAMESPACE=${MY_USERNAME}${DEV_PROJECT_ID} |
| 77 | +kubectl --context ${CLUSTER_CONTEXT} get namespace ${DEV_NAMESPACE} >& /dev/null || \ |
| 78 | + (can_i "create namespace,create deployment,create ingress,create service,create secret,create configmap" && \ |
| 79 | + kubectl --context ${CLUSTER_CONTEXT} create namespace ${DEV_NAMESPACE}) |
| 80 | +echo " Namespace: ${DEV_NAMESPACE}" |
| 81 | + |
| 82 | +# Setup dev secret from pre-configed one |
| 83 | +kubectl --context ${CLUSTER_CONTEXT} get secret ${SECRET_NAME} -n ${DEV_NAMESPACE} >& /dev/null || \ |
| 84 | + echo ${DEV_SECRET_JSON} | jq 'del(.metadata["namespace","creationTimestamp","resourceVersion","selfLink","uid"])' | sed "s/${DEV_SECRET_NAME}/${SECRET_NAME}/g" | kubectl --context ${CLUSTER_CONTEXT} apply -n ${DEV_NAMESPACE} -f - |
| 85 | +echo " Secret: ${SECRET_NAME}" |
| 86 | + |
| 87 | +# Setup dev service account from pre-configured one |
| 88 | +SERVICE_ACCOUNT=backend-service |
| 89 | +kubectl --context ${CLUSTER_CONTEXT} get sa ${SERVICE_ACCOUNT} -n ${DEV_NAMESPACE} >& /dev/null || \ |
| 90 | + kubectl --context ${CLUSTER_CONTEXT} get sa ${SERVICE_ACCOUNT} -n ${NAMESPACE} -o json | jq 'del(.metadata["namespace","creationTimestamp","resourceVersion","selfLink","uid"])' | kubectl --context ${CLUSTER_CONTEXT} apply -n ${DEV_NAMESPACE} -f - |
| 91 | + |
| 92 | +# Setup dev k8s manifests, configuration, docker login etc |
| 93 | +CONFIG_ENVIRONMENT="dev" |
| 94 | +EXT_HOSTNAME=<% index .Params `stagingBackendSubdomain` %><% index .Params `stagingHostRoot` %> |
| 95 | +MY_EXT_HOSTNAME=${DEV_NAMESPACE}-${EXT_HOSTNAME} |
| 96 | +ECR_REPO=${ACCOUNT_ID}.dkr.ecr.${REGION}.amazonaws.com/${PROJECT_NAME} |
| 97 | +VERSION_TAG=latest |
| 98 | +DATABASE_NAME=<% index .Params `databaseName` %> |
| 99 | +DEV_DATABASE_NAME=$(echo "dev${MY_USERNAME}" | tr -dc 'A-Za-z0-9') |
| 100 | +echo " Domain: ${MY_EXT_HOSTNAME}" |
| 101 | +echo " Database Name: ${DEV_DATABASE_NAME}" |
| 102 | + |
| 103 | +# Apply migration |
| 104 | +MIGRATION_NAME=${PROJECT_NAME}-migration |
| 105 | +SQL_DIR="${PWD}/database/migration" |
| 106 | +## launch migration job |
| 107 | +(cd kubernetes/migration && \ |
| 108 | + kubectl --context ${CLUSTER_CONTEXT} -n ${DEV_NAMESPACE} create configmap ${MIGRATION_NAME} --from-file ${SQL_DIR}/*.sql || error_exit "Failed to apply kubernetes migration configmap" && \ |
| 109 | + cat job.yml | \ |
| 110 | + sed "s|/${DATABASE_NAME}|/${DEV_DATABASE_NAME}|g" | \ |
| 111 | + kubectl --context ${CLUSTER_CONTEXT} -n ${DEV_NAMESPACE} create -f - ) || error_exit "Failed to apply kubernetes migration" |
| 112 | +## confirm migration job done |
| 113 | +if ! kubectl --context ${CLUSTER_CONTEXT} -n ${DEV_NAMESPACE} wait --for=condition=complete --timeout=180s job/${MIGRATION_NAME} ; then |
| 114 | + echo "${MIGRATION_NAME} run failed:" |
| 115 | + kubectl --context ${CLUSTER_CONTEXT} -n ${DEV_NAMESPACE} describe job ${MIGRATION_NAME} |
| 116 | + error_exit "Failed migration. Leaving namespace ${DEV_NAMESPACE} for debugging" |
| 117 | +fi |
| 118 | + |
| 119 | +# Apply manifests |
| 120 | +(cd kubernetes/overlays/${CONFIG_ENVIRONMENT} && \ |
| 121 | + kustomize build . | \ |
| 122 | + sed "s|${EXT_HOSTNAME}|${MY_EXT_HOSTNAME}|g" | \ |
| 123 | + sed "s|DATABASE_NAME: ${DATABASE_NAME}|DATABASE_NAME: ${DEV_DATABASE_NAME}|g" | \ |
| 124 | + kubectl --context ${CLUSTER_CONTEXT} -n ${DEV_NAMESPACE} apply -f - ) || error_exit "Failed to apply kubernetes manifests" |
| 125 | + |
| 126 | +# Confirm deployment |
| 127 | +if ! kubectl --context ${CLUSTER_CONTEXT} -n ${DEV_NAMESPACE} rollout status deployment/${PROJECT_NAME} -w --timeout=180s ; then |
| 128 | + echo "${PROJECT_NAME} rollout check failed:" |
| 129 | + echo "${PROJECT_NAME} deployment:" |
| 130 | + kubectl --context ${CLUSTER_CONTEXT} -n ${DEV_NAMESPACE} describe deployment ${PROJECT_NAME} |
| 131 | + echo "${PROJECT_NAME} replicaset:" |
| 132 | + kubectl --context ${CLUSTER_CONTEXT} -n ${DEV_NAMESPACE} describe rs -l app=${PROJECT_NAME} |
| 133 | + echo "${PROJECT_NAME} pods:" |
| 134 | + kubectl --context ${CLUSTER_CONTEXT} -n ${DEV_NAMESPACE} describe pod -l app=${PROJECT_NAME} |
| 135 | + error_exit "Failed deployment. Leaving namespace ${DEV_NAMESPACE} for debugging" |
| 136 | +fi |
| 137 | + |
| 138 | +# Verify until the ingress DNS gets ready |
| 139 | +echo |
| 140 | +if nslookup ${MY_EXT_HOSTNAME} >& /dev/null; then |
| 141 | + echo " Notice: your domain is ready to use." |
| 142 | +else |
| 143 | + echo " Notice: the first time you use this environment it may take up to 5 minutes for DNS to propagate before the hostname is available." |
| 144 | + bash -c "while ! nslookup ${MY_EXT_HOSTNAME} >& /dev/null; do sleep 30; done; echo && echo \" Notice: your domain ${MY_EXT_HOSTNAME} is ready to use.\";" & |
| 145 | +fi |
| 146 | + |
| 147 | +# Starting telepresence shell |
| 148 | +echo |
| 149 | +echo "Now you are ready to access your service at:" |
| 150 | +echo |
| 151 | +echo " https://${MY_EXT_HOSTNAME}" |
| 152 | +echo |
| 153 | +echo -n "Your telepresence dev environment is now loading which will proxy all the requests and environment variables from the cloud EKS cluster to the local shell.\nNote that the above URL access will get a \"502 Bad Gateway\" error until you launch the service in the shell, at which point it will start receiving traffic." |
| 154 | +echo |
| 155 | + |
| 156 | +# Starting dev environment with telepresence shell |
| 157 | +echo |
| 158 | +telepresence --context ${CLUSTER_CONTEXT} --swap-deployment ${PROJECT_NAME} --namespace ${DEV_NAMESPACE} --expose 80 --run-shell |
| 159 | + |
| 160 | +# Ending dev environment |
| 161 | +## delete the most of resources (except ingress related, as we hit rate limit of certificate issuer(letsencrypt) |
| 162 | +echo |
| 163 | +kubectl --context ${CLUSTER_CONTEXT} -n ${DEV_NAMESPACE} delete job ${MIGRATION_NAME} |
| 164 | +kubectl --context ${CLUSTER_CONTEXT} -n ${DEV_NAMESPACE} delete cm ${MIGRATION_NAME} |
| 165 | +for r in hpa deployments services jobs pods cronjob; do |
| 166 | + kubectl --context ${CLUSTER_CONTEXT} -n ${DEV_NAMESPACE} delete $r --all |
| 167 | +done |
| 168 | +echo "Your dev environment resources under namespace ${DEV_NAMESPACE} have been deleted" |
| 169 | +echo |
0 commit comments