Skip to content

Commit b0b3d9e

Browse files
committed
cherry-pick: porting over dev environment (#15)
1 parent d62de3e commit b0b3d9e

File tree

7 files changed

+277
-0
lines changed

7 files changed

+277
-0
lines changed

README.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,11 @@ This repository is language/business-logic agnostic; mainly showcasing some univ
4444
| | | |-- ingress.yml
4545
| | | |-- kustomization.yml
4646
| | | |-- pdb.yml
47+
| | |-- dev/
48+
| | | |-- deployment.yml
49+
| | | |-- ingress.yml
50+
| | | |-- kustomization.yml
51+
| | | |-- pdb.yml
4752
| |-- secrets/
4853
| | |-- .gitignore
4954
| | |-- kustomization.yml

templates/.circleci/config.yml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -282,6 +282,9 @@ jobs:
282282
kubectl -n $NAMESPACE describe pod -l app=$DEPLOYMENT
283283
exit 1
284284
fi
285+
286+
MANIFEST=$(aws ecr batch-get-image --region << parameters.region >> --repository-name << parameters.repo >> --image-ids imageTag=${VERSION_TAG} --query 'images[].imageManifest' --output text)
287+
aws ecr put-image --region << parameters.region >> --repository-name << parameters.repo >> --image-tag last-deployed --image-manifest "$MANIFEST"
285288
workflows:
286289
version: 2
287290
# The main workflow. Check out the code, build it, push it, deploy to staging, test, deploy to production

templates/README.md

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,21 @@ kubectl -n <% .Name %> get pods
1212
### Configuring
1313
You can update the resource limits in the [kubernetes/base/deployment.yml][base-deployment], and control fine-grain customizations based on environment and specific deployments such as Scaling out your production replicas from the [overlays configurations][env-prod]
1414

15+
### Dev Environment
16+
This project is set up with a local/cloud hybrid dev environment. This means you can do fast local development of a single service, even if that service depends on other resources in your cluster.
17+
Make a change to your service, run it, and you can immediately see the new service in action in a real environment. You can also use any tools like your local IDE, debugger, etc. to test/debug/edit/run your service.
18+
19+
Usually when developing you would run the service locally with a local database and any other dependencies running either locally or in containers using `docker-compose`, `minikube`, etc.
20+
Now your service will have access to any dependencies within a namespace running in the EKS cluster, with access to resources there.
21+
[Telepresence](https://telepresence.io) is used to provide this functionality.
22+
23+
Development workflow:
24+
25+
1. Run `start-dev-env.sh` - You will be dropped into a shell that is the same as your local machine, but works as if it were running inside a pod in your k8s cluster
26+
2. Change code and run the server - As you run your local server, using local code, it will have access to remote dependencies, and will be sent traffic by the load balancer
27+
3. Test on your cloud environment with real dependencies - `https://<your name>-<% index .Params `stagingBackendSubdomain` %><% index .Params `stagingHostRoot` %>`
28+
4. git commit & auto-deploy to Staging through the build pipeline
29+
1530
## Circle CI
1631
Your repository comes with a end-to-end CI/CD pipeline, which includes the following steps:
1732
1. Checkout
Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
apiVersion: apps/v1
2+
kind: Deployment
3+
metadata:
4+
name: <% .Name %>
5+
spec:
6+
template:
7+
spec:
8+
containers:
9+
- name: <% .Name %>
10+
image: <% index .Params `accountId` %>.dkr.ecr.<% index .Params `region` %>.amazonaws.com/<% .Name %>:last-deployed
11+
resources:
12+
requests:
13+
memory: 64Mi
14+
cpu: 0.1
15+
limits:
16+
memory: 256Mi
17+
cpu: 1.0
Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,53 @@
1+
apiVersion: extensions/v1beta1
2+
kind: Ingress
3+
metadata:
4+
name: <% .Name %>
5+
annotations:
6+
# nginx ingress
7+
kubernetes.io/ingress.class: nginx
8+
nginx.ingress.kubernetes.io/rewrite-target: /$1
9+
# cert-manager
10+
ingress.kubernetes.io/ssl-redirect: "true"
11+
cert-manager.io/cluster-issuer: clusterissuer-letsencrypt-production
12+
# CORS
13+
nginx.ingress.kubernetes.io/enable-cors: "true"
14+
## to support both frontend origin and 'localhost', need 'configuration-snippet' implementation here, because 'cors-allow-origin' field doesn't support multiple originss yet.
15+
nginx.ingress.kubernetes.io/configuration-snippet: |
16+
if ($http_origin ~* "^https?://((?:<% index .Params `stagingFrontendSubdomain` %><% index .Params `stagingHostRoot` %>)|(?:localhost))") {
17+
set $cors "true";
18+
}
19+
if ($request_method = 'OPTIONS') {
20+
set $cors "${cors}options";
21+
}
22+
23+
if ($cors = "true") {
24+
add_header 'Access-Control-Allow-Origin' "$http_origin" always;
25+
add_header 'Access-Control-Allow-Methods' 'GET, PUT, POST, DELETE, PATCH, OPTIONS' always;
26+
add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization' always;
27+
add_header 'Access-Control-Expose-Headers' 'Content-Length,Content-Range' always;
28+
}
29+
30+
if ($cors = "trueoptions") {
31+
add_header 'Access-Control-Allow-Origin' "$http_origin";
32+
add_header 'Access-Control-Allow-Methods' 'GET, PUT, POST, DELETE, PATCH, OPTIONS';
33+
add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization';
34+
add_header 'Access-Control-Expose-Headers' 'Content-Length,Content-Range';
35+
add_header 'Access-Control-Max-Age' 1728000;
36+
add_header 'Content-Type' 'text/plain charset=UTF-8';
37+
add_header 'Content-Length' 0;
38+
return 204;
39+
}
40+
41+
spec:
42+
rules:
43+
- host: <% index .Params `stagingBackendSubdomain` %><% index .Params `stagingHostRoot` %>
44+
http:
45+
paths:
46+
- path: /(.*)
47+
backend:
48+
serviceName: <% .Name %>
49+
servicePort: http
50+
tls:
51+
- hosts:
52+
- <% index .Params `stagingBackendSubdomain` %><% index .Params `stagingHostRoot` %>
53+
secretName: <% .Name %>-tls-secret
Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
apiVersion: kustomize.config.k8s.io/v1beta1
2+
kind: Kustomization
3+
4+
patchesStrategicMerge:
5+
- deployment.yml
6+
7+
resources:
8+
- ../../base
9+
- ingress.yml
10+
11+
configMapGenerator:
12+
- name: <% .Name %>-config
13+
behavior: merge
14+
literals:
15+
- ENVIRONMENT=staging

templates/start-dev-env.sh

Lines changed: 169 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,169 @@
1+
#!/bin/bash
2+
3+
#
4+
# This script is to create a dev namespace on Staging environment
5+
#
6+
PROJECT_NAME=<% .Name %>
7+
ENVIRONMENT=stage
8+
ACCOUNT_ID=<% index .Params `accountId` %>
9+
REGION=<% index .Params `region` %>
10+
11+
# common functions
12+
function usage() {
13+
echo
14+
echo "Usage:"
15+
echo " $0 <project id>"
16+
echo " - project id: can be 001, 002, or whatever id without space"
17+
exit 1
18+
}
19+
20+
function command_exist() {
21+
command -v ${1} >& /dev/null
22+
}
23+
24+
function error_exit() {
25+
echo "ERROR : $1"
26+
exit 2
27+
}
28+
29+
function can_i() {
30+
commands=$1
31+
IFS=',' read -r -a array <<< "$commands"
32+
err=0
33+
for command in "${array[@]}"
34+
do
35+
kubectl --context ${CLUSTER_CONTEXT} auth can-i $command >& /dev/null || (echo "No permission to '$command'" && let "err+=1")
36+
done
37+
38+
[[ $err -gt 0 ]] && error_exit "Found $err permission errors. Please check with your administrator."
39+
40+
echo "Permission checks: passed"
41+
return 0
42+
}
43+
44+
# Start
45+
# Validate current iam user
46+
MY_USERNAME=$(aws sts get-caller-identity --output json | jq -r .Arn | cut -d/ -f2)
47+
DEV_USERS=$(aws iam get-group --group-name ${PROJECT_NAME}-developer-${ENVIRONMENT} | jq -r .Users[].UserName)
48+
[[ "${DEV_USERS[@]}" =~ "${MY_USERNAME}" ]] || error_exit "You (${MY_USERNAME}) are not in the ${PROJECT_NAME}-developer-${ENVIRONMENT} IAM group."
49+
50+
DEV_PROJECT_ID=${1:-""}
51+
52+
echo '[Dev Environment]'
53+
54+
# Validate cluster
55+
CLUSTER_CONTEXT=${PROJECT_NAME}-${ENVIRONMENT}-${REGION}
56+
echo " Cluster context: ${CLUSTER_CONTEXT}"
57+
58+
# Validate secret
59+
NAMESPACE=${PROJECT_NAME}
60+
SECRET_NAME=${PROJECT_NAME}
61+
DEV_SECRET_NAME=devenv${PROJECT_NAME}
62+
DEV_SECRET_JSON=$(kubectl --context ${CLUSTER_CONTEXT} get secret ${DEV_SECRET_NAME} -n ${NAMESPACE} -o json)
63+
[[ -z "${DEV_SECRET_JSON}" ]] && error_exit "The secret ${DEV_SECRET_NAME} is not existing in namespace '${NAMESPACE}'."
64+
65+
# Check installations
66+
if ! command_exist kustomize || ! command_exist telepresence; then
67+
if ! command_exist kustomize; then
68+
error_exit "command 'kustomize' not found: please visit https://kubectl.docs.kubernetes.io/installation/kustomize/"
69+
fi
70+
if ! command_exist kubectl; then
71+
error_exit "command 'telepresence' not found. You can download it at https://www.telepresence.io/reference/install"
72+
fi
73+
fi
74+
75+
# Setup dev namepsace
76+
DEV_NAMESPACE=${MY_USERNAME}${DEV_PROJECT_ID}
77+
kubectl --context ${CLUSTER_CONTEXT} get namespace ${DEV_NAMESPACE} >& /dev/null || \
78+
(can_i "create namespace,create deployment,create ingress,create service,create secret,create configmap" && \
79+
kubectl --context ${CLUSTER_CONTEXT} create namespace ${DEV_NAMESPACE})
80+
echo " Namespace: ${DEV_NAMESPACE}"
81+
82+
# Setup dev secret from pre-configed one
83+
kubectl --context ${CLUSTER_CONTEXT} get secret ${SECRET_NAME} -n ${DEV_NAMESPACE} >& /dev/null || \
84+
echo ${DEV_SECRET_JSON} | jq 'del(.metadata["namespace","creationTimestamp","resourceVersion","selfLink","uid"])' | sed "s/${DEV_SECRET_NAME}/${SECRET_NAME}/g" | kubectl --context ${CLUSTER_CONTEXT} apply -n ${DEV_NAMESPACE} -f -
85+
echo " Secret: ${SECRET_NAME}"
86+
87+
# Setup dev service account from pre-configured one
88+
SERVICE_ACCOUNT=backend-service
89+
kubectl --context ${CLUSTER_CONTEXT} get sa ${SERVICE_ACCOUNT} -n ${DEV_NAMESPACE} >& /dev/null || \
90+
kubectl --context ${CLUSTER_CONTEXT} get sa ${SERVICE_ACCOUNT} -n ${NAMESPACE} -o json | jq 'del(.metadata["namespace","creationTimestamp","resourceVersion","selfLink","uid"])' | kubectl --context ${CLUSTER_CONTEXT} apply -n ${DEV_NAMESPACE} -f -
91+
92+
# Setup dev k8s manifests, configuration, docker login etc
93+
CONFIG_ENVIRONMENT="dev"
94+
EXT_HOSTNAME=<% index .Params `stagingBackendSubdomain` %><% index .Params `stagingHostRoot` %>
95+
MY_EXT_HOSTNAME=${DEV_NAMESPACE}-${EXT_HOSTNAME}
96+
ECR_REPO=${ACCOUNT_ID}.dkr.ecr.${REGION}.amazonaws.com/${PROJECT_NAME}
97+
VERSION_TAG=latest
98+
DATABASE_NAME=<% index .Params `databaseName` %>
99+
DEV_DATABASE_NAME=$(echo "dev${MY_USERNAME}" | tr -dc 'A-Za-z0-9')
100+
echo " Domain: ${MY_EXT_HOSTNAME}"
101+
echo " Database Name: ${DEV_DATABASE_NAME}"
102+
103+
# Apply migration
104+
MIGRATION_NAME=${PROJECT_NAME}-migration
105+
SQL_DIR="${PWD}/database/migration"
106+
## launch migration job
107+
(cd kubernetes/migration && \
108+
kubectl --context ${CLUSTER_CONTEXT} -n ${DEV_NAMESPACE} create configmap ${MIGRATION_NAME} --from-file ${SQL_DIR}/*.sql || error_exit "Failed to apply kubernetes migration configmap" && \
109+
cat job.yml | \
110+
sed "s|/${DATABASE_NAME}|/${DEV_DATABASE_NAME}|g" | \
111+
kubectl --context ${CLUSTER_CONTEXT} -n ${DEV_NAMESPACE} create -f - ) || error_exit "Failed to apply kubernetes migration"
112+
## confirm migration job done
113+
if ! kubectl --context ${CLUSTER_CONTEXT} -n ${DEV_NAMESPACE} wait --for=condition=complete --timeout=180s job/${MIGRATION_NAME} ; then
114+
echo "${MIGRATION_NAME} run failed:"
115+
kubectl --context ${CLUSTER_CONTEXT} -n ${DEV_NAMESPACE} describe job ${MIGRATION_NAME}
116+
error_exit "Failed migration. Leaving namespace ${DEV_NAMESPACE} for debugging"
117+
fi
118+
119+
# Apply manifests
120+
(cd kubernetes/overlays/${CONFIG_ENVIRONMENT} && \
121+
kustomize build . | \
122+
sed "s|${EXT_HOSTNAME}|${MY_EXT_HOSTNAME}|g" | \
123+
sed "s|DATABASE_NAME: ${DATABASE_NAME}|DATABASE_NAME: ${DEV_DATABASE_NAME}|g" | \
124+
kubectl --context ${CLUSTER_CONTEXT} -n ${DEV_NAMESPACE} apply -f - ) || error_exit "Failed to apply kubernetes manifests"
125+
126+
# Confirm deployment
127+
if ! kubectl --context ${CLUSTER_CONTEXT} -n ${DEV_NAMESPACE} rollout status deployment/${PROJECT_NAME} -w --timeout=180s ; then
128+
echo "${PROJECT_NAME} rollout check failed:"
129+
echo "${PROJECT_NAME} deployment:"
130+
kubectl --context ${CLUSTER_CONTEXT} -n ${DEV_NAMESPACE} describe deployment ${PROJECT_NAME}
131+
echo "${PROJECT_NAME} replicaset:"
132+
kubectl --context ${CLUSTER_CONTEXT} -n ${DEV_NAMESPACE} describe rs -l app=${PROJECT_NAME}
133+
echo "${PROJECT_NAME} pods:"
134+
kubectl --context ${CLUSTER_CONTEXT} -n ${DEV_NAMESPACE} describe pod -l app=${PROJECT_NAME}
135+
error_exit "Failed deployment. Leaving namespace ${DEV_NAMESPACE} for debugging"
136+
fi
137+
138+
# Verify until the ingress DNS gets ready
139+
echo
140+
if nslookup ${MY_EXT_HOSTNAME} >& /dev/null; then
141+
echo " Notice: your domain is ready to use."
142+
else
143+
echo " Notice: the first time you use this environment it may take up to 5 minutes for DNS to propagate before the hostname is available."
144+
bash -c "while ! nslookup ${MY_EXT_HOSTNAME} >& /dev/null; do sleep 30; done; echo && echo \" Notice: your domain ${MY_EXT_HOSTNAME} is ready to use.\";" &
145+
fi
146+
147+
# Starting telepresence shell
148+
echo
149+
echo "Now you are ready to access your service at:"
150+
echo
151+
echo " https://${MY_EXT_HOSTNAME}"
152+
echo
153+
echo -n "Your telepresence dev environment is now loading which will proxy all the requests and environment variables from the cloud EKS cluster to the local shell.\nNote that the above URL access will get a \"502 Bad Gateway\" error until you launch the service in the shell, at which point it will start receiving traffic."
154+
echo
155+
156+
# Starting dev environment with telepresence shell
157+
echo
158+
telepresence --context ${CLUSTER_CONTEXT} --swap-deployment ${PROJECT_NAME} --namespace ${DEV_NAMESPACE} --expose 80 --run-shell
159+
160+
# Ending dev environment
161+
## delete the most of resources (except ingress related, as we hit rate limit of certificate issuer(letsencrypt)
162+
echo
163+
kubectl --context ${CLUSTER_CONTEXT} -n ${DEV_NAMESPACE} delete job ${MIGRATION_NAME}
164+
kubectl --context ${CLUSTER_CONTEXT} -n ${DEV_NAMESPACE} delete cm ${MIGRATION_NAME}
165+
for r in hpa deployments services jobs pods cronjob; do
166+
kubectl --context ${CLUSTER_CONTEXT} -n ${DEV_NAMESPACE} delete $r --all
167+
done
168+
echo "Your dev environment resources under namespace ${DEV_NAMESPACE} have been deleted"
169+
echo

0 commit comments

Comments
 (0)