From c91f656722f370c0fd94fea167abcc3741d5dd28 Mon Sep 17 00:00:00 2001 From: sanjay7178 Date: Fri, 8 Aug 2025 23:44:42 +0530 Subject: [PATCH 1/3] feat(contour-bookinfo): Add Contour Ingress Gateway configuration for Bookinfo application Signed-off-by: sanjay7178 --- kind/bookinfo/contour_ingress/README.md | 188 ++++++++ .../contour_ingress/bookinfo-httproxy.yaml | 15 + .../contour_ingress/contour-install.yaml | 446 ++++++++++++++++++ .../contour_ingress/contour-rbac.yaml | 172 +++++++ .../bookinfo/contour_ingress/productpage.yaml | 73 +++ .../contour_ingress/serviceexports.yaml | 44 ++ kind/bookinfo/contour_ingress/slice.yaml | 35 ++ 7 files changed, 973 insertions(+) create mode 100644 kind/bookinfo/contour_ingress/README.md create mode 100644 kind/bookinfo/contour_ingress/bookinfo-httproxy.yaml create mode 100644 kind/bookinfo/contour_ingress/contour-install.yaml create mode 100644 kind/bookinfo/contour_ingress/contour-rbac.yaml create mode 100644 kind/bookinfo/contour_ingress/productpage.yaml create mode 100644 kind/bookinfo/contour_ingress/serviceexports.yaml create mode 100644 kind/bookinfo/contour_ingress/slice.yaml diff --git a/kind/bookinfo/contour_ingress/README.md b/kind/bookinfo/contour_ingress/README.md new file mode 100644 index 0000000..50b4f5c --- /dev/null +++ b/kind/bookinfo/contour_ingress/README.md @@ -0,0 +1,188 @@ +# Bookinfo with Contour Ingress Gateway + +This example demonstrates how to deploy the Istio Bookinfo application using Contour as an ingress gateway instead of Istio Gateway or direct NodePort exposure. + +## Architecture + +``` +Internet -> Contour Envoy (NodePort) -> HTTPProxy -> ProductPage Service (ClusterIP) -> Backend Services (via KubeSlice) +``` + +## Table of Contents +- [Bookinfo with Contour Ingress Gateway](#bookinfo-with-contour-ingress-gateway) + - [Architecture](#architecture) + - [Table of Contents](#table-of-contents) + - [Overview](#overview) + - [Architecture](#architecture-1) + - [Prerequisites](#prerequisites) + - [Installation](#installation) + - [1. Setup Environment Variables](#1-setup-environment-variables) + - [2. Product Cluster Setup](#2-product-cluster-setup) + - [3. Services Cluster Setup](#3-services-cluster-setup) + - [4. KubeSlice Controller Setup](#4-kubeslice-controller-setup) + - [Testing the Application](#testing-the-application) + - [Option 1: Access via Contour Ingress](#option-1-access-via-contour-ingress) + - [Option 2: Port-Forward the Product Page](#option-2-port-forward-the-product-page) + - [Troubleshooting](#troubleshooting) + - [Verify Cross-Cluster Communication](#verify-cross-cluster-communication) + - [Verify KubeSlice Components](#verify-kubeslice-components) + - [Cleanup](#cleanup) + +## Overview + +The bookinfo application is deployed across two KubeSlice clusters: +- **Product Cluster**: Contains the productpage service and Contour ingress gateway +- **Services Cluster**: Contains details, reviews, and ratings services + +## Architecture + +``` +Internet -> Contour Envoy (NodePort) -> HTTPProxy -> ProductPage Service (ClusterIP) -> Backend Services (via KubeSlice) +``` + +## Prerequisites + +- KubeSlice installed and configured across clusters +- `kubectl` configured with proper contexts +- `kubectx` installed for context switching +- Access to all three clusters (controller + 2 worker clusters) + +## Installation + +### 1. Setup Environment Variables + +```bash +# Define cluster contexts and namespace +export PRODUCT_CLUSTER="kind-ks-w-1" # Cluster for productpage and ingress +export CONTROLLER_CLUSTER="kind-ks-ctrl" # KubeSlice controller cluster +export SERVICES_CLUSTER="kind-ks-w-2" # Cluster for backend services +export BOOKINFO_NAMESPACE="bookinfo" # Namespace for bookinfo components +``` + +### 2. Product Cluster Setup + +```bash +# Switch to product cluster and create namespace +kubectx $PRODUCT_CLUSTER +kubectl create namespace $BOOKINFO_NAMESPACE + +# Install Contour ingress controller and its components +kubectl apply -f https://projectcontour.io/quickstart/contour.yaml +kubectl apply -f contour-install.yaml +kubectl apply -f contour-rbac.yaml + +# Verify Contour is ready +kubectl get pods -n projectcontour -o wide + +# Deploy productpage component +kubectl apply -f ${CONFIG_DIR}/productpage.yaml -n $BOOKINFO_NAMESPACE + +# Configure HTTP Proxy for ingress routing +kubectl apply -f ${CONFIG_DIR}/bookinfo-httpproxy.yaml -n $BOOKINFO_NAMESPACE + +# Verify all pods are running +kubectl get pods -n $BOOKINFO_NAMESPACE +``` + +### 3. Services Cluster Setup + +```bash +# Switch to services cluster and create namespace +kubectx $SERVICES_CLUSTER +kubectl create namespace $BOOKINFO_NAMESPACE + +# Install backend services +kubectl apply -f details.yaml -n $BOOKINFO_NAMESPACE +kubectl apply -f ratings.yaml -n $BOOKINFO_NAMESPACE +kubectl apply -f reviews.yaml -n $BOOKINFO_NAMESPACE + +# Create service exports for cross-cluster communication +kubectl apply -f serviceexports.yaml -n $BOOKINFO_NAMESPACE + +# Verify service exports were created +kubectl get serviceexport -n $BOOKINFO_NAMESPACE +``` + +### 4. KubeSlice Controller Setup + +Apply the slice configuration +```bash +kubectx $CONTROLLER_CLUSTER +kubectl apply -f slice.yaml +``` + +## Testing the Application + +### Option 1: Access via Contour Ingress + +Get the Contour Envoy endpoint +```bash +kubectx $PRODUCT_CLUSTER +kubectl get -n projectcontour service envoy -o wide + +``` +Access the application using the Envoy endpoint +Example: http://:/productpage + +### Option 2: Port-Forward the Product Page + +```bash +kubectx $PRODUCT_CLUSTER +kubectl port-forward -n bookinfo svc/productpage 9080:9080 +``` +Test individual services +```bash +curl http://localhost:9080/details/1 +curl http://localhost:9080/reviews/1 +curl http://localhost:9080/ratings/1 +``` +Test main page +```bash +curl http://localhost:9080/productpage +``` + +Or access in browser: http://localhost:9080/productpage + + +## Troubleshooting + +### Verify Cross-Cluster Communication + +Check DNS resolution from productpage pod +```bash +kubectx $PRODUCT_CLUSTER +PRODUCTPAGE_POD=$(kubectl get pods -n bookinfo -l app=productpage -o jsonpath='{.items[0].metadata.name}') +kubectl exec -it -n bookinfo $PRODUCTPAGE_POD -c netshoot -- /bin/bash + +# From inside the pod, test DNS resolution +nslookup details +nslookup reviews +nslookup ratings +``` + +### Verify KubeSlice Components + +Check slice gateway status +```bash +kubectl get slicegw -A +kubectl get workerslicegateway -n kubeslice-system +``` +Verify DNS configuration for cross-cluster services +```bash +kubectl get configmap -n kubeslice-system kubeslice-dns -o yaml +``` + +## Cleanup + +```bash +# Clean up services cluster +kubectx $SERVICES_CLUSTER +kubectl delete namespace $BOOKINFO_NAMESPACE + +# Clean up product cluster +kubectx $PRODUCT_CLUSTER +kubectl delete namespace $BOOKINFO_NAMESPACE +kubectl delete -f contour-install.yaml +kubectl delete -f contour-rbac.yaml +kubectl delete namespace projectcontour +``` \ No newline at end of file diff --git a/kind/bookinfo/contour_ingress/bookinfo-httproxy.yaml b/kind/bookinfo/contour_ingress/bookinfo-httproxy.yaml new file mode 100644 index 0000000..721b32f --- /dev/null +++ b/kind/bookinfo/contour_ingress/bookinfo-httproxy.yaml @@ -0,0 +1,15 @@ +# HTTPProxy configuration for bookinfo productpage +apiVersion: projectcontour.io/v1 +kind: HTTPProxy +metadata: + name: bookinfo-productpage + namespace: bookinfo +spec: + virtualhost: + fqdn: bookinfo.local + routes: + - conditions: + - prefix: / + services: + - name: productpage + port: 9080 \ No newline at end of file diff --git a/kind/bookinfo/contour_ingress/contour-install.yaml b/kind/bookinfo/contour_ingress/contour-install.yaml new file mode 100644 index 0000000..8ee5a21 --- /dev/null +++ b/kind/bookinfo/contour_ingress/contour-install.yaml @@ -0,0 +1,446 @@ +# Contour installation manifest +# This installs Contour ingress controller +apiVersion: v1 +kind: Namespace +metadata: + name: projectcontour +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: contour + namespace: projectcontour +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: envoy + namespace: projectcontour +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: contour + namespace: projectcontour +data: + contour.yaml: | + # + # server: + # determine which XDS Server implementation to utilize in Contour. + # xds-server-type: contour + # + # Specify the Gateway API configuration. + # gateway: + # controllerName: projectcontour.io/gateway-controller + # + # should contour expect to be running inside a k8s cluster + # incluster: true + # + # path to kubeconfig (if not running inside a k8s cluster) + # kubeconfig: /path/to/cluster/admin/kubeconfig + # + # Client request timeout to be passed to Envoy + # as the connection manager request_timeout. + # Defaults to 0, which Envoy interprets as disabled. + # Note that this is the timeout for the whole request, + # not an idle timeout. + # request-timeout: 10s + # + # disable ingressroute permitInsecure field + disablePermitInsecure: false + tls: + # minimum TLS version that Contour will negotiate + # minimum-protocol-version: "1.2" + # The following config shows the defaults for the leader election. + # leaderelection: + # configmap-name: leader-elect + # configmap-namespace: projectcontour + ### Logging options + # Default setting + accesslog-format: envoy + # To enable JSON logging in Envoy + # accesslog-format: json + # The default fields that will be logged are specified below. + # To customise this list, just add or remove entries. + # The canonical list is available at + # https://godoc.org/github.com/projectcontour/contour/internal/envoy#JSONFields + # json-fields: + # - "@timestamp" + # - "authority" + # - "bytes_received" + # - "bytes_sent" + # - "downstream_local_address" + # - "downstream_remote_address" + # - "duration" + # - "method" + # - "path" + # - "protocol" + # - "request_id" + # - "requested_server_name" + # - "response_code" + # - "response_flags" + # - "uber_trace_id" + # - "upstream_cluster" + # - "upstream_host" + # - "upstream_local_address" + # - "upstream_service_time" + # - "user_agent" + # - "x_forwarded_for" + # + # default-http-versions: + # This defines the default set of HTTPS versions the proxy should accept. + # HTTP versions are strings of the form "HTTP/xx". + # Supported versions are "HTTP/1.1" and "HTTP/2". + # Values are stored in the order they are provided. + # Note: This only applies to secured requests. + # - "HTTP/2" + # - "HTTP/1.1" + # + # The following shows the default proxy timeout settings. + # timeouts: + # request-timeout: infinity + # connection-idle-timeout: 60s + # stream-idle-timeout: 5m + # max-connection-duration: infinity + # delayclose-timeout: 60s + # connection-shutdown-grace-period: 5s + # + # Envoy cluster settings. + # cluster: + # configure the cluster dns lookup family + # valid options are: auto (default), v4, v6 + # dns-lookup-family: auto + # + # Envoy network settings. + # network: + # Configure the number of additional ingress proxy hops from the + # right side of the x-forwarded-for HTTP header to trust. + # num-trusted-hops: 0 + # Configure the port used to access the Envoy admin interface. + # admin-port: 9001 + # + # Configure an optional global rate limit service. + # rateLimitService: + # Identifies the extension service defining the rate limit service, + # formatted as /. + # extensionService: projectcontour/ratelimit + # Defines the rate limit domain to pass to the rate limit service. + # domain: contour + # Defines whether to allow requests to proceed when the rate limit + # service fails to respond with a valid rate limit decision within + # the timeout defined on the extension service. + # failOpen: false + # Defines whether to include the X-RateLimit headers X-RateLimit-Limit, + # X-RateLimit-Remaining, and X-RateLimit-Reset (as defined by the IETF + # Internet-Draft linked below), on responses to clients when the Rate + # Limit Service is consulted for a request. + # ref. https://tools.ietf.org/id/draft-polli-ratelimit-headers-00.html + # enableXRateLimitHeaders: false + # + # Global Policy settings. + # policy: + # # Default headers to set on all requests (unless set/removed on the HTTPProxy object, or overridden + # # by the Route or Service). + # request-headers: + # set: + # # example: the hostname of the Envoy instance that processed the request + # X-Envoy-Hostname: %HOSTNAME% + # # example: add a l5d-dst-override header to instruct Linkerd what service the request is destined for + # l5d-dst-override: %CONTOUR_SERVICE_NAME%.%CONTOUR_NAMESPACE%.svc.cluster.local:%CONTOUR_SERVICE_PORT% + # remove: + # - X-User-Secret-Header + # # default headers to set on all responses (unless set/removed on the HTTPProxy object, or overridden + # # by the Route or Service). + # response-headers: + # set: + # # example: Envoy flags that provide additional details about the response or connection + # X-Envoy-Response-Flags: %RESPONSE_FLAGS% + # remove: + # - X-Internal-Secret-Header + # + # metrics: + # contour: + # address: 0.0.0.0 + # port: 8000 + # envoy: + # address: 0.0.0.0 + # port: 8002 + # + # listener: + # connection-balancer: exact + # socket-options: + # tos: 0 + # traffic-class: 0 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: contour + name: contour + namespace: projectcontour +spec: + replicas: 1 + strategy: + type: RollingUpdate + rollingUpdate: + # This value of maxSurge means that during a rolling update + # the new ReplicaSet will be created first. + maxSurge: 50% + selector: + matchLabels: + app: contour + template: + metadata: + labels: + app: contour + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: + app: contour + topologyKey: kubernetes.io/hostname + weight: 100 + containers: + - args: + - serve + - --incluster + - --xds-address=0.0.0.0 + - --xds-port=8001 + - --contour-cafile=/certs/ca.crt + - --contour-cert-file=/certs/tls.crt + - --contour-key-file=/certs/tls.key + - --config-path=/config/contour.yaml + command: ["contour"] + image: ghcr.io/projectcontour/contour:v1.26.1 + imagePullPolicy: IfNotPresent + name: contour + ports: + - containerPort: 8001 + name: xds + protocol: TCP + - containerPort: 8000 + name: metrics + protocol: TCP + - containerPort: 6060 + name: debug + protocol: TCP + livenessProbe: + httpGet: + path: /healthz + port: 8000 + readinessProbe: + tcpSocket: + port: 8001 + initialDelaySeconds: 15 + periodSeconds: 10 + volumeMounts: + - name: contourcert + mountPath: /certs + readOnly: true + - name: contour-config + mountPath: /config + readOnly: true + env: + - name: CONTOUR_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + dnsPolicy: ClusterFirst + serviceAccountName: contour + terminationGracePeriodSeconds: 30 + volumes: + - name: contourcert + secret: + secretName: contourcert + - name: contour-config + configMap: + name: contour + defaultMode: 0644 +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + app: envoy + name: envoy + namespace: projectcontour +spec: + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 10% + selector: + matchLabels: + app: envoy + template: + metadata: + labels: + app: envoy + spec: + containers: + - command: + - /bin/contour + args: + - envoy + - shutdown-manager + image: ghcr.io/projectcontour/contour:v1.26.1 + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - /bin/contour + - envoy + - shutdown + livenessProbe: + httpGet: + path: /healthz + port: 8090 + initialDelaySeconds: 3 + periodSeconds: 10 + name: shutdown-manager + ports: + - containerPort: 8090 + name: metrics + protocol: TCP + resources: {} + volumeMounts: + - name: envoy-admin + mountPath: /admin + - args: + - -c + - /config/envoy.json + - --service-cluster $(CONTOUR_NAMESPACE) + - --service-node $(ENVOY_POD_NAME) + - --log-level info + command: + - envoy + image: docker.io/envoyproxy/envoy:v1.27.2 + imagePullPolicy: IfNotPresent + name: envoy + env: + - name: CONTOUR_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: ENVOY_POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + ports: + - containerPort: 8080 + hostPort: 80 + name: http + protocol: TCP + - containerPort: 8443 + hostPort: 443 + name: https + protocol: TCP + readinessProbe: + httpGet: + path: /ready + port: 8002 + initialDelaySeconds: 3 + periodSeconds: 4 + volumeMounts: + - name: envoy-config + mountPath: /config + readOnly: true + - name: envoycert + mountPath: /certs + readOnly: true + - name: envoy-admin + mountPath: /admin + lifecycle: + preStop: + httpGet: + path: /shutdown + port: 8090 + scheme: HTTP + initContainers: + - args: + - bootstrap + - /config/envoy.json + - --xds-address=contour + - --xds-port=8001 + - --xds-resource-version=v3 + - --resources-dir=/config/resources + - --envoy-cafile=/certs/ca.crt + - --envoy-cert-file=/certs/tls.crt + - --envoy-key-file=/certs/tls.key + command: + - contour + image: ghcr.io/projectcontour/contour:v1.26.1 + imagePullPolicy: IfNotPresent + name: envoy-initconfig + volumeMounts: + - name: envoy-config + mountPath: /config + - name: envoycert + mountPath: /certs + readOnly: true + env: + - name: CONTOUR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + automountServiceAccountToken: false + serviceAccountName: envoy + terminationGracePeriodSeconds: 300 + volumes: + - name: envoy-config + emptyDir: {} + - name: envoycert + secret: + secretName: envoycert + - name: envoy-admin + emptyDir: {} + hostNetwork: false + dnsPolicy: ClusterFirst +--- +apiVersion: v1 +kind: Service +metadata: + name: contour + namespace: projectcontour +spec: + ports: + - port: 8001 + name: xds + protocol: TCP + targetPort: 8001 + selector: + app: contour + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + name: envoy + namespace: projectcontour +spec: + externalTrafficPolicy: Local + ports: + - port: 80 + name: http + protocol: TCP + targetPort: 8080 + - port: 443 + name: https + protocol: TCP + targetPort: 8443 + selector: + app: envoy + type: NodePort \ No newline at end of file diff --git a/kind/bookinfo/contour_ingress/contour-rbac.yaml b/kind/bookinfo/contour_ingress/contour-rbac.yaml new file mode 100644 index 0000000..8e1a8ed --- /dev/null +++ b/kind/bookinfo/contour_ingress/contour-rbac.yaml @@ -0,0 +1,172 @@ +# Contour RBAC and certificates +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: contour +rules: +- apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + verbs: + - list + - watch + - get +- apiGroups: + - "" + resources: + - nodes + verbs: + - get +- apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - create + - get + - update +- apiGroups: + - projectcontour.io + resources: + - httpproxies + - tlscertificatedelegations + - extensionservices + - contourconfigurations + verbs: + - get + - list + - watch +- apiGroups: + - projectcontour.io + resources: + - httpproxies/status + - extensionservices/status + - contourconfigurations/status + verbs: + - create + - get + - update +- apiGroups: + - networking.x-k8s.io + resources: + - gatewayclasses + - gateways + - httproutes + - referencepolicies + - tlsroutes + verbs: + - get + - list + - watch +- apiGroups: + - networking.x-k8s.io + resources: + - gatewayclasses/status + - gateways/status + - httproutes/status + - tlsroutes/status + verbs: + - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: contour +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: contour +subjects: +- kind: ServiceAccount + name: contour + namespace: projectcontour +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: contour-leaderelection + namespace: projectcontour +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - get + - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: contour-leaderelection + namespace: projectcontour +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: contour-leaderelection +subjects: +- kind: ServiceAccount + name: contour + namespace: projectcontour +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: contour-certgen + namespace: projectcontour +spec: + template: + metadata: + labels: + app: contour-certgen + spec: + containers: + - name: contour + image: ghcr.io/projectcontour/contour:v1.26.1 + imagePullPolicy: IfNotPresent + command: + - contour + - certgen + - --kube + - --incluster + - --overwrite + - --secrets-format=compact + - --namespace=$(CONTOUR_NAMESPACE) + env: + - name: CONTOUR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: certs + mountPath: /certs + volumes: + - name: certs + emptyDir: {} + restartPolicy: Never + serviceAccountName: contour + parallelism: 1 + completions: 1 + backoffLimit: 1 \ No newline at end of file diff --git a/kind/bookinfo/contour_ingress/productpage.yaml b/kind/bookinfo/contour_ingress/productpage.yaml new file mode 100644 index 0000000..652129d --- /dev/null +++ b/kind/bookinfo/contour_ingress/productpage.yaml @@ -0,0 +1,73 @@ +################################################################################################## +# Productpage service +################################################################################################## +apiVersion: v1 +kind: Service +metadata: + name: productpage + labels: + app: productpage + service: productpage +spec: + type: ClusterIP + ports: + - port: 9080 + name: http + selector: + app: productpage +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bookinfo-productpage + labels: + account: productpage +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: productpage-v1 + labels: + app: productpage + version: v1 +spec: + replicas: 1 + selector: + matchLabels: + app: productpage + version: v1 + template: + metadata: + labels: + app: productpage + version: v1 + spec: + serviceAccountName: bookinfo-productpage + containers: + - name: productpage + image: docker.io/istio/examples-bookinfo-productpage-v1:1.16.2 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 + volumeMounts: + - name: tmp + mountPath: /tmp + securityContext: + runAsUser: 1000 + env: + - name: REVIEWS_HOSTNAME + value: reviews.bookinfo.svc.slice.local + - name: DETAILS_HOSTNAME + value: details.bookinfo.svc.slice.local + - name: netshoot + image: nicolaka/netshoot + imagePullPolicy: IfNotPresent + command: ["/bin/sleep", "3650d"] + securityContext: + capabilities: + add: ["NET_ADMIN"] + allowPrivilegeEscalation: true + privileged: true + volumes: + - name: tmp + emptyDir: {} \ No newline at end of file diff --git a/kind/bookinfo/contour_ingress/serviceexports.yaml b/kind/bookinfo/contour_ingress/serviceexports.yaml new file mode 100644 index 0000000..abe7555 --- /dev/null +++ b/kind/bookinfo/contour_ingress/serviceexports.yaml @@ -0,0 +1,44 @@ +apiVersion: networking.kubeslice.io/v1beta1 +kind: ServiceExport +metadata: + name: details + namespace: bookinfo +spec: + slice: bookinfo-slice + selector: + matchLabels: + app: details + ports: + - name: http + protocol: TCP + containerPort: 9080 +--- +apiVersion: networking.kubeslice.io/v1beta1 +kind: ServiceExport +metadata: + name: reviews + namespace: bookinfo +spec: + slice: bookinfo-slice + selector: + matchLabels: + app: reviews + ports: + - name: http + protocol: TCP + containerPort: 9080 +--- +apiVersion: networking.kubeslice.io/v1beta1 +kind: ServiceExport +metadata: + name: ratings + namespace: bookinfo +spec: + slice: bookinfo-slice + selector: + matchLabels: + app: ratings + ports: + - name: http + protocol: TCP + containerPort: 9080 \ No newline at end of file diff --git a/kind/bookinfo/contour_ingress/slice.yaml b/kind/bookinfo/contour_ingress/slice.yaml new file mode 100644 index 0000000..4cc2dd9 --- /dev/null +++ b/kind/bookinfo/contour_ingress/slice.yaml @@ -0,0 +1,35 @@ +apiVersion: controller.kubeslice.io/v1alpha1 +kind: SliceConfig +metadata: + name: bookinfo-slice + namespace: kubeslice-demo +spec: + sliceSubnet: 10.1.0.0/16 + sliceType: Application + sliceGatewayProvider: + sliceGatewayType: OpenVPN + sliceCaType: Local + sliceIpamType: Local + clusters: + - ks-w-1 + - ks-w-2 + qosProfileDetails: + queueType: HTB + priority: 0 + tcType: BANDWIDTH_CONTROL + bandwidthCeilingKbps: 5000 + bandwidthGuaranteedKbps: 1000 + dscpClass: AF11 + namespaceIsolationProfile: + applicationNamespaces: + - namespace: bookinfo + clusters: + - '*' + isolationEnabled: false + allowedNamespaces: + - namespace: kube-system + clusters: + - '*' + - namespace: projectcontour + clusters: + - '*' \ No newline at end of file From eba971e3ed19c84117cc684a10d6533b6f6787f9 Mon Sep 17 00:00:00 2001 From: sanjay7178 Date: Fri, 8 Aug 2025 23:56:32 +0530 Subject: [PATCH 2/3] refactor(contour-install): Remove redundant namespace and service account definitions Signed-off-by: sanjay7178 --- .../contour_ingress/contour-install.yaml | 172 ------------------ 1 file changed, 172 deletions(-) diff --git a/kind/bookinfo/contour_ingress/contour-install.yaml b/kind/bookinfo/contour_ingress/contour-install.yaml index 8ee5a21..21958c1 100644 --- a/kind/bookinfo/contour_ingress/contour-install.yaml +++ b/kind/bookinfo/contour_ingress/contour-install.yaml @@ -1,177 +1,5 @@ # Contour installation manifest # This installs Contour ingress controller -apiVersion: v1 -kind: Namespace -metadata: - name: projectcontour ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: contour - namespace: projectcontour ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: envoy - namespace: projectcontour ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: contour - namespace: projectcontour -data: - contour.yaml: | - # - # server: - # determine which XDS Server implementation to utilize in Contour. - # xds-server-type: contour - # - # Specify the Gateway API configuration. - # gateway: - # controllerName: projectcontour.io/gateway-controller - # - # should contour expect to be running inside a k8s cluster - # incluster: true - # - # path to kubeconfig (if not running inside a k8s cluster) - # kubeconfig: /path/to/cluster/admin/kubeconfig - # - # Client request timeout to be passed to Envoy - # as the connection manager request_timeout. - # Defaults to 0, which Envoy interprets as disabled. - # Note that this is the timeout for the whole request, - # not an idle timeout. - # request-timeout: 10s - # - # disable ingressroute permitInsecure field - disablePermitInsecure: false - tls: - # minimum TLS version that Contour will negotiate - # minimum-protocol-version: "1.2" - # The following config shows the defaults for the leader election. - # leaderelection: - # configmap-name: leader-elect - # configmap-namespace: projectcontour - ### Logging options - # Default setting - accesslog-format: envoy - # To enable JSON logging in Envoy - # accesslog-format: json - # The default fields that will be logged are specified below. - # To customise this list, just add or remove entries. - # The canonical list is available at - # https://godoc.org/github.com/projectcontour/contour/internal/envoy#JSONFields - # json-fields: - # - "@timestamp" - # - "authority" - # - "bytes_received" - # - "bytes_sent" - # - "downstream_local_address" - # - "downstream_remote_address" - # - "duration" - # - "method" - # - "path" - # - "protocol" - # - "request_id" - # - "requested_server_name" - # - "response_code" - # - "response_flags" - # - "uber_trace_id" - # - "upstream_cluster" - # - "upstream_host" - # - "upstream_local_address" - # - "upstream_service_time" - # - "user_agent" - # - "x_forwarded_for" - # - # default-http-versions: - # This defines the default set of HTTPS versions the proxy should accept. - # HTTP versions are strings of the form "HTTP/xx". - # Supported versions are "HTTP/1.1" and "HTTP/2". - # Values are stored in the order they are provided. - # Note: This only applies to secured requests. - # - "HTTP/2" - # - "HTTP/1.1" - # - # The following shows the default proxy timeout settings. - # timeouts: - # request-timeout: infinity - # connection-idle-timeout: 60s - # stream-idle-timeout: 5m - # max-connection-duration: infinity - # delayclose-timeout: 60s - # connection-shutdown-grace-period: 5s - # - # Envoy cluster settings. - # cluster: - # configure the cluster dns lookup family - # valid options are: auto (default), v4, v6 - # dns-lookup-family: auto - # - # Envoy network settings. - # network: - # Configure the number of additional ingress proxy hops from the - # right side of the x-forwarded-for HTTP header to trust. - # num-trusted-hops: 0 - # Configure the port used to access the Envoy admin interface. - # admin-port: 9001 - # - # Configure an optional global rate limit service. - # rateLimitService: - # Identifies the extension service defining the rate limit service, - # formatted as /. - # extensionService: projectcontour/ratelimit - # Defines the rate limit domain to pass to the rate limit service. - # domain: contour - # Defines whether to allow requests to proceed when the rate limit - # service fails to respond with a valid rate limit decision within - # the timeout defined on the extension service. - # failOpen: false - # Defines whether to include the X-RateLimit headers X-RateLimit-Limit, - # X-RateLimit-Remaining, and X-RateLimit-Reset (as defined by the IETF - # Internet-Draft linked below), on responses to clients when the Rate - # Limit Service is consulted for a request. - # ref. https://tools.ietf.org/id/draft-polli-ratelimit-headers-00.html - # enableXRateLimitHeaders: false - # - # Global Policy settings. - # policy: - # # Default headers to set on all requests (unless set/removed on the HTTPProxy object, or overridden - # # by the Route or Service). - # request-headers: - # set: - # # example: the hostname of the Envoy instance that processed the request - # X-Envoy-Hostname: %HOSTNAME% - # # example: add a l5d-dst-override header to instruct Linkerd what service the request is destined for - # l5d-dst-override: %CONTOUR_SERVICE_NAME%.%CONTOUR_NAMESPACE%.svc.cluster.local:%CONTOUR_SERVICE_PORT% - # remove: - # - X-User-Secret-Header - # # default headers to set on all responses (unless set/removed on the HTTPProxy object, or overridden - # # by the Route or Service). - # response-headers: - # set: - # # example: Envoy flags that provide additional details about the response or connection - # X-Envoy-Response-Flags: %RESPONSE_FLAGS% - # remove: - # - X-Internal-Secret-Header - # - # metrics: - # contour: - # address: 0.0.0.0 - # port: 8000 - # envoy: - # address: 0.0.0.0 - # port: 8002 - # - # listener: - # connection-balancer: exact - # socket-options: - # tos: 0 - # traffic-class: 0 ---- apiVersion: apps/v1 kind: Deployment metadata: From dc35dd0554d158ccbfc7ce9bf5777686da50fa39 Mon Sep 17 00:00:00 2001 From: sanjay7178 Date: Sat, 9 Aug 2025 13:58:12 +0530 Subject: [PATCH 3/3] refactor(readme): Update installation steps and improve clarity for KubeSlice setup Signed-off-by: sanjay7178 --- kind/bookinfo/contour_ingress/README.md | 76 ++++++++++++++++++------- 1 file changed, 54 insertions(+), 22 deletions(-) diff --git a/kind/bookinfo/contour_ingress/README.md b/kind/bookinfo/contour_ingress/README.md index 50b4f5c..839f2d2 100644 --- a/kind/bookinfo/contour_ingress/README.md +++ b/kind/bookinfo/contour_ingress/README.md @@ -17,9 +17,9 @@ Internet -> Contour Envoy (NodePort) -> HTTPProxy -> ProductPage Service (Cluste - [Prerequisites](#prerequisites) - [Installation](#installation) - [1. Setup Environment Variables](#1-setup-environment-variables) - - [2. Product Cluster Setup](#2-product-cluster-setup) - - [3. Services Cluster Setup](#3-services-cluster-setup) - - [4. KubeSlice Controller Setup](#4-kubeslice-controller-setup) + - [2. KubeSlice Setup (Controller and Workers)](#2-kubeslice-setup-controller-and-workers) + - [3. Product Cluster Setup](#3-product-cluster-setup) + - [4. Services Cluster Setup](#4-services-cluster-setup) - [Testing the Application](#testing-the-application) - [Option 1: Access via Contour Ingress](#option-1-access-via-contour-ingress) - [Option 2: Port-Forward the Product Page](#option-2-port-forward-the-product-page) @@ -53,18 +53,51 @@ Internet -> Contour Envoy (NodePort) -> HTTPProxy -> ProductPage Service (Cluste ```bash # Define cluster contexts and namespace -export PRODUCT_CLUSTER="kind-ks-w-1" # Cluster for productpage and ingress -export CONTROLLER_CLUSTER="kind-ks-ctrl" # KubeSlice controller cluster -export SERVICES_CLUSTER="kind-ks-w-2" # Cluster for backend services -export BOOKINFO_NAMESPACE="bookinfo" # Namespace for bookinfo components +export PRODUCT_CLUSTER="kind-ks-w-1" +export CONTROLLER_CLUSTER="kind-ks-ctrl" +export SERVICES_CLUSTER="kind-ks-w-2" +export BOOKINFO_NAMESPACE="bookinfo" +export SLICE_NAME="bookinfo-slice" # KubeSlice slice name ``` -### 2. Product Cluster Setup +### 2. KubeSlice Setup (Controller and Workers) ```bash -# Switch to product cluster and create namespace +# Verify KubeSlice controller and worker agents are installed +kubectx $CONTROLLER_CLUSTER +kubectl get pods -n kubeslice-controller --no-headers + +kubectx $PRODUCT_CLUSTER +kubectl get pods -n kubeslice-system --no-headers + +kubectx $SERVICES_CLUSTER +kubectl get pods -n kubeslice-system --no-headers + +# Apply the slice configuration on the controller +kubectx $CONTROLLER_CLUSTER +kubectl apply -f slice.yaml + +# Create and label the application namespace on both worker clusters so workloads join the slice +kubectx $PRODUCT_CLUSTER +kubectl create namespace $BOOKINFO_NAMESPACE || true +kubectl label namespace $BOOKINFO_NAMESPACE kubeslice.io/slice=$SLICE_NAME --overwrite + +kubectx $SERVICES_CLUSTER +kubectl create namespace $BOOKINFO_NAMESPACE || true +kubectl label namespace $BOOKINFO_NAMESPACE kubeslice.io/slice=$SLICE_NAME --overwrite + +# Wait for slice gateways to be ready on both worker clusters +kubectx $PRODUCT_CLUSTER +kubectl get workerslicegateway -n kubeslice-system +kubectx $SERVICES_CLUSTER +kubectl get workerslicegateway -n kubeslice-system +``` + +### 3. Product Cluster Setup + +```bash +# Switch to product cluster (namespace already created and labeled in Step 2) kubectx $PRODUCT_CLUSTER -kubectl create namespace $BOOKINFO_NAMESPACE # Install Contour ingress controller and its components kubectl apply -f https://projectcontour.io/quickstart/contour.yaml @@ -74,7 +107,7 @@ kubectl apply -f contour-rbac.yaml # Verify Contour is ready kubectl get pods -n projectcontour -o wide -# Deploy productpage component +# Deploy productpage component into the slice namespace kubectl apply -f ${CONFIG_DIR}/productpage.yaml -n $BOOKINFO_NAMESPACE # Configure HTTP Proxy for ingress routing @@ -84,12 +117,11 @@ kubectl apply -f ${CONFIG_DIR}/bookinfo-httpproxy.yaml -n $BOOKINFO_NAMESPACE kubectl get pods -n $BOOKINFO_NAMESPACE ``` -### 3. Services Cluster Setup +### 4. Services Cluster Setup ```bash -# Switch to services cluster and create namespace +# Switch to services cluster (namespace already created and labeled in Step 2) kubectx $SERVICES_CLUSTER -kubectl create namespace $BOOKINFO_NAMESPACE # Install backend services kubectl apply -f details.yaml -n $BOOKINFO_NAMESPACE @@ -103,14 +135,6 @@ kubectl apply -f serviceexports.yaml -n $BOOKINFO_NAMESPACE kubectl get serviceexport -n $BOOKINFO_NAMESPACE ``` -### 4. KubeSlice Controller Setup - -Apply the slice configuration -```bash -kubectx $CONTROLLER_CLUSTER -kubectl apply -f slice.yaml -``` - ## Testing the Application ### Option 1: Access via Contour Ingress @@ -171,6 +195,10 @@ Verify DNS configuration for cross-cluster services ```bash kubectl get configmap -n kubeslice-system kubeslice-dns -o yaml ``` +Verify namespace membership on the slice +```bash +kubectl get ns $BOOKINFO_NAMESPACE --show-labels +``` ## Cleanup @@ -185,4 +213,8 @@ kubectl delete namespace $BOOKINFO_NAMESPACE kubectl delete -f contour-install.yaml kubectl delete -f contour-rbac.yaml kubectl delete namespace projectcontour + +# Remove slice configuration +kubectx $CONTROLLER_CLUSTER +kubectl delete -f slice.yaml ``` \ No newline at end of file