diff --git a/.gitignore b/.gitignore index 37d3b44f..8c89176a 100644 --- a/.gitignore +++ b/.gitignore @@ -13,9 +13,12 @@ .idea/ .vscode/ *.swp +*.swo +*~ *.bak *.tmp *.temp +.tmp/ # System-specific ignores .DS_Store @@ -56,4 +59,8 @@ applications/wg-easy/release/ .specstory/.what-is-this.md *.tar.gz +# Flipt specific +applications/flipt/release/ +applications/flipt/chart/Chart.lock + **/.claude/settings.local.json diff --git a/applications/flipt/Makefile b/applications/flipt/Makefile new file mode 100644 index 00000000..c844ebc4 --- /dev/null +++ b/applications/flipt/Makefile @@ -0,0 +1,176 @@ +.PHONY: help lint package update-deps install uninstall upgrade test clean release + +CHART_DIR := chart +CHART_NAME := flipt +NAMESPACE := flipt +RELEASE_NAME := flipt +RELEASE_DIR := release +REPLICATED_DIR := replicated +CHART_VERSION := $(shell grep '^version:' $(CHART_DIR)/Chart.yaml | awk '{print $$2}') + +help: ## Display this help message + @echo "Flipt Helm Chart Management" + @echo "" + @echo "Available targets:" + @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf " \033[36m%-20s\033[0m %s\n", $$1, $$2}' + +lint: ## Lint the Helm chart + @echo "Linting Helm chart..." + helm lint $(CHART_DIR) + +package: lint ## Package the Helm chart + @echo "Packaging Helm chart..." + @mkdir -p $(RELEASE_DIR) + helm package $(CHART_DIR) -d $(RELEASE_DIR) + +update-deps: ## Update Helm chart dependencies + @echo "Adding Helm repositories..." + helm repo add flipt https://helm.flipt.io || true + helm repo add bitnami https://charts.bitnami.com/bitnami || true + helm repo add cnpg https://cloudnative-pg.github.io/charts || true + helm repo add replicated https://charts.replicated.com || true + helm repo update + @echo "Updating chart dependencies..." + cd $(CHART_DIR) && helm dependency update + +install: ## Install the chart (operator included as dependency) + @echo "Installing $(CHART_NAME)..." + @echo "Note: This includes CloudNativePG operator installation, which may take a few minutes..." + helm install $(RELEASE_NAME) $(CHART_DIR) \ + --namespace $(NAMESPACE) \ + --create-namespace \ + --wait \ + --wait-for-jobs \ + --timeout 15m + +install-no-operator: ## Install without CloudNativePG operator (use if already installed) + @echo "Installing $(CHART_NAME) without operator..." + helm install $(RELEASE_NAME) $(CHART_DIR) \ + --namespace $(NAMESPACE) \ + --create-namespace \ + --set cloudnative-pg.enabled=false \ + --wait \ + --wait-for-jobs \ + --timeout 10m + +uninstall: ## Uninstall the chart + @echo "Uninstalling $(CHART_NAME)..." + helm uninstall $(RELEASE_NAME) --namespace $(NAMESPACE) + +upgrade: ## Upgrade the chart + @echo "Upgrading $(CHART_NAME)..." + helm upgrade $(RELEASE_NAME) $(CHART_DIR) \ + --namespace $(NAMESPACE) \ + --wait \ + --timeout 10m + +template: ## Render chart templates locally + @echo "Rendering templates..." + helm template $(RELEASE_NAME) $(CHART_DIR) \ + --namespace $(NAMESPACE) \ + --debug + +test: ## Run Helm tests + @echo "Running Helm tests..." + helm test $(RELEASE_NAME) --namespace $(NAMESPACE) + +status: ## Show release status + @echo "Release status:" + helm status $(RELEASE_NAME) --namespace $(NAMESPACE) + +values: ## Show computed values + @echo "Computed values:" + helm get values $(RELEASE_NAME) --namespace $(NAMESPACE) + +manifest: ## Show deployed manifest + @echo "Deployed manifest:" + helm get manifest $(RELEASE_NAME) --namespace $(NAMESPACE) + +logs: ## Tail Flipt logs + @echo "Tailing Flipt logs..." + kubectl logs -l app.kubernetes.io/name=flipt -n $(NAMESPACE) --tail=100 -f + +clean: ## Clean generated files + @echo "Cleaning generated files..." + rm -rf $(CHART_DIR)/charts/*.tgz + rm -rf $(CHART_DIR)/Chart.lock + rm -rf $(RELEASE_DIR)/*.tgz + +clean-install: clean update-deps install ## Clean rebuild and install + +port-forward: ## Port forward to Flipt service + @echo "Port forwarding to Flipt (http://localhost:8080)..." + kubectl port-forward -n $(NAMESPACE) svc/$(RELEASE_NAME)-flipt 8080:8080 + +replicated-lint: ## Lint Replicated KOTS configs + @echo "Linting KOTS configuration..." + replicated release lint --yaml-dir $(REPLICATED_DIR)/ + +replicated-create: package ## Create a new Replicated release (includes packaging) + @echo "Creating Replicated release..." + replicated release create --auto --yaml-dir $(REPLICATED_DIR)/ + +preflight: ## Run preflight checks + @echo "Running preflight checks..." + kubectl preflight $(CHART_DIR)/templates/secret-preflights.yaml + +support-bundle: ## Generate support bundle + @echo "Generating support bundle..." + kubectl support-bundle $(CHART_DIR)/templates/secret-supportbundle.yaml + +check-deps: ## Verify all required tools are installed + @echo "Checking dependencies..." + @command -v helm >/dev/null 2>&1 || { echo "helm is not installed"; exit 1; } + @command -v kubectl >/dev/null 2>&1 || { echo "kubectl is not installed"; exit 1; } + @echo "All dependencies satisfied!" + +release: package ## Lint, package, update Replicated config, and create release + @# Clean previous chart archives from replicated dir + @rm -f $(REPLICATED_DIR)/$(CHART_NAME)-*.tgz + @# Copy packaged chart to replicated dir + @cp $(RELEASE_DIR)/$(CHART_NAME)-$(CHART_VERSION).tgz $(REPLICATED_DIR)/ + @# Update chartVersion in HelmChart config + @sed -i '' 's/chartVersion:.*/chartVersion: $(CHART_VERSION)/' $(REPLICATED_DIR)/kots-helm-chart.yaml + @echo "Chart $(CHART_NAME)-$(CHART_VERSION) packaged and staged in $(REPLICATED_DIR)/" + @echo "" + @# Lint Replicated KOTS configs + @echo "Linting KOTS configuration..." + replicated release lint --yaml-dir $(REPLICATED_DIR)/ + @echo "" + @# Determine next version by bumping previous release patch version + @PREV_VERSION=$$(replicated release ls 2>/dev/null | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' | head -1); \ + if [ -n "$$PREV_VERSION" ]; then \ + MAJOR=$$(echo $$PREV_VERSION | cut -d. -f1); \ + MINOR=$$(echo $$PREV_VERSION | cut -d. -f2); \ + PATCH=$$(echo $$PREV_VERSION | cut -d. -f3); \ + NEXT_VERSION="$$MAJOR.$$MINOR.$$((PATCH + 1))"; \ + echo "Previous release: $$PREV_VERSION"; \ + else \ + NEXT_VERSION="$(CHART_VERSION)"; \ + echo "No previous release found, defaulting to chart version"; \ + fi; \ + echo "Suggested version: $$NEXT_VERSION"; \ + echo ""; \ + read -p "Release version [$$NEXT_VERSION]: " INPUT_VERSION; \ + VERSION=$${INPUT_VERSION:-$$NEXT_VERSION}; \ + echo ""; \ + read -p "Release notes: " RELEASE_NOTES; \ + if [ -z "$$RELEASE_NOTES" ]; then \ + echo "Error: release notes are required"; \ + exit 1; \ + fi; \ + echo ""; \ + echo "Creating Replicated release v$$VERSION..."; \ + replicated release create \ + --promote Unstable \ + --yaml-dir $(REPLICATED_DIR) \ + --version "$$VERSION" \ + --release-notes "$$RELEASE_NOTES" + +# Development helpers +dev-install: update-deps install ## Update dependencies and install + +dev-upgrade: update-deps upgrade ## Update dependencies and upgrade + +watch-pods: ## Watch pod status + kubectl get pods -n $(NAMESPACE) -w diff --git a/applications/flipt/QUICKSTART.md b/applications/flipt/QUICKSTART.md new file mode 100644 index 00000000..9aa14bcb --- /dev/null +++ b/applications/flipt/QUICKSTART.md @@ -0,0 +1,279 @@ +# Flipt Quick Start Guide + +Get up and running with Flipt in 5 minutes. + +## Prerequisites + +- Kubernetes cluster (1.24+) +- Helm 3.8+ +- kubectl configured + +## ⚠️ Prerequisites + +Before you begin, you need a **Replicated development license**: + +```bash +# 1. Set your Replicated API token +export REPLICATED_API_TOKEN=your-token-here + + +**Don't have a Replicated account?** +- Sign up at [vendor.replicated.com](https://vendor.replicated.com) +- See [Development License Guide](docs/DEVELOPMENT_LICENSE.md) for detailed instructions + +## Option 1: Quick Install (Development) + +Install with default settings for testing: + +### Easy Install (Recommended) + +Use the automated installation script: + +```bash +./scripts/install.sh +``` + +This script will: +- ✅ Check prerequisites (kubectl, helm) +- ✅ Install CloudNativePG operator (if not present) +- ✅ Add all required Helm repositories +- ✅ Clean and rebuild dependencies +- ✅ Install Flipt with all components +- ✅ Show status and next steps + +### Manual Install + +If you prefer to run commands manually: + +```bash +# Step 1: Update chart dependencies (includes CloudNativePG operator) +cd chart +rm -f Chart.lock # Clean cached files +helm repo add flipt https://helm.flipt.io +helm repo add valkey https://valkey.io/valkey-helm/ +helm repo add cnpg https://cloudnative-pg.github.io/charts +helm repo add replicated https://charts.replicated.com +helm repo update +helm dependency update +cd .. + +# Step 2: Install Flipt (operator included automatically) +helm install flipt ./chart \ + --namespace flipt \ + --create-namespace \ + --wait \ + --timeout 10m + +# Step 3: Port forward to access +kubectl port-forward -n flipt svc/flipt-flipt 8080:8080 +``` + +Open your browser to: **http://localhost:8080** + +## Option 2: Replicated KOTS Install + +For enterprise deployments with admin console: + +1. **Upload the application** to your Replicated vendor portal: + ```bash + replicated release create --auto --yaml-dir replicated/ + ``` + +2. **Install via Replicated Admin Console**: + - Log into your Replicated admin console + - Select the Flipt application + - Follow the configuration wizard + - Deploy + +3. **Access Flipt** through configured ingress or LoadBalancer + +## Option 3: Production Install + +For production with HA: + +```bash +helm install flipt ./chart \ + --namespace flipt \ + --create-namespace \ + --values examples/kubernetes/values-production.yaml \ + --wait +``` + +Access via your configured ingress hostname. + +## Your First Feature Flag + +### 1. Access the UI + +Navigate to the Flipt UI (http://localhost:8080 if using port-forward). + +### 2. Create a Flag + +1. Click **"Flags"** in the sidebar +2. Click **"Create Flag"** +3. Fill in: + - **Name**: `new_dashboard` + - **Description**: `Enable the new dashboard UI` + - **Type**: Boolean +4. Click **"Create"** + +### 3. Enable the Flag + +1. Toggle the flag to **Enabled** +2. Set a percentage rollout (e.g., 50%) +3. Click **"Save"** + +### 4. Use the Flag in Your App + +**Node.js:** +```javascript +const { FliptClient } = require('@flipt-io/flipt'); + +const flipt = new FliptClient({ url: 'http://localhost:8080' }); + +const result = await flipt.evaluateBoolean({ + namespaceKey: 'default', + flagKey: 'new_dashboard', + entityId: 'user-123', + context: {} +}); + +if (result.enabled) { + console.log('Show new dashboard!'); +} +``` + +**Go:** +```go +import flipt "go.flipt.io/flipt/rpc/flipt" + +client := flipt.NewFliptClient(conn) + +resp, _ := client.EvaluateBoolean(ctx, &flipt.EvaluationRequest{ + NamespaceKey: "default", + FlagKey: "new_dashboard", + EntityId: "user-123", +}) + +if resp.Enabled { + fmt.Println("Show new dashboard!") +} +``` + +**Python:** +```python +from flipt import FliptClient + +client = FliptClient(url="http://localhost:8080") + +result = client.evaluate_boolean( + namespace_key="default", + flag_key="new_dashboard", + entity_id="user-123" +) + +if result.enabled: + print("Show new dashboard!") +``` + +## Verify Installation + +Check that all components are running: + +```bash +# Check pods +kubectl get pods -n flipt + +# Should see: +# - flipt-flipt-xxx (2 replicas) +# - flipt-cluster-xxx (PostgreSQL) +# - flipt-valkey-xxx + +# Check services +kubectl get svc -n flipt + +# Check ingress (if enabled) +kubectl get ingress -n flipt +``` + +## Common Commands + +```bash +# View logs +kubectl logs -l app.kubernetes.io/name=flipt -n flipt --tail=100 -f + +# Restart Flipt +kubectl rollout restart deployment/flipt-flipt -n flipt + +# Scale Flipt +kubectl scale deployment/flipt-flipt -n flipt --replicas=3 + +# Check database status +kubectl get cluster -n flipt + +# Check Valkey status +kubectl get pods -l app.kubernetes.io/name=valkey -n flipt +``` + +## Troubleshooting + +### Pods Not Starting + +```bash +kubectl describe pod -n flipt +kubectl logs -n flipt +``` + +### Can't Access UI + +```bash +# Verify service is running +kubectl get svc flipt-flipt -n flipt + +# Check if port-forward is working +kubectl port-forward -n flipt svc/flipt-flipt 8080:8080 + +# Test locally +curl http://localhost:8080/health +``` + +### Database Connection Issues + +```bash +# Check PostgreSQL cluster +kubectl get cluster -n flipt + +# Check PostgreSQL logs +kubectl logs -l cnpg.io/cluster=flipt-cluster -n flipt +``` + +## Next Steps + +1. **Set up ingress** for external access +2. **Configure authentication** for API security +3. **Enable metrics** for monitoring +4. **Create targeting rules** for user segmentation +5. **Integrate SDKs** into your applications + +## Resources + +- 📖 [Full Documentation](../README.md) +- 💻 [SDK Examples](examples/sdk/) +- ⚙️ [Configuration Examples](examples/kubernetes/) +- 🆘 [Troubleshooting Guide](../README.md#troubleshooting) + +## Uninstall + +```bash +# Uninstall Flipt +helm uninstall flipt --namespace flipt + +# Remove namespace and PVCs +kubectl delete namespace flipt +``` + +## Support + +- **Flipt Issues**: https://github.com/flipt-io/flipt/issues +- **Helm Chart Issues**: https://github.com/flipt-io/helm-charts/issues +- **Replicated Support**: https://support.replicated.com diff --git a/applications/flipt/README.md b/applications/flipt/README.md new file mode 100644 index 00000000..de5268a3 --- /dev/null +++ b/applications/flipt/README.md @@ -0,0 +1,606 @@ +# Flipt Feature Flags + +Enterprise-ready deployment of [Flipt](https://flipt.io), an open-source, self-hosted feature flag and experimentation platform, integrated with Replicated for streamlined Kubernetes deployment. + +## Overview + +Flipt enables teams to: + +- **Deploy features gradually** with percentage-based rollouts +- **Target specific users** with advanced segmentation rules +- **Run A/B tests** and experiments safely +- **Manage feature flags** across multiple environments +- **Reduce deployment risk** with instant kill switches + +This Helm chart provides a production-ready deployment with: + +- ✅ PostgreSQL database (embedded via CloudnativePG or external) +- ✅ Valkey distributed caching for high performance +- ✅ Horizontal pod autoscaling support +- ✅ TLS/ingress configuration +- ✅ Replicated SDK integration for enterprise management +- ✅ Comprehensive monitoring and metrics +- ✅ Support bundle generation for troubleshooting + +## Architecture + +``` +┌─────────────────────────────────────────────────────────────┐ +│ Load Balancer │ +│ (Ingress) │ +└─────────────────┬───────────────────────────────────────────┘ + │ + ┌─────────▼─────────┐ + │ Flipt Service │ + │ (2+ replicas) │ + │ HTTP + gRPC │ + └──────┬────────┬───┘ + │ │ + ┌─────────▼──┐ ┌─▼──────────┐ + │ PostgreSQL │ │ Valkey │ + │ (CNPG) │ │ (Cache) │ + └────────────┘ └────────────┘ +``` + +### Components + +1. **Flipt Server**: Core application handling feature flag evaluation and management +2. **PostgreSQL**: Durable storage for feature flag definitions and metadata (CloudnativePG operator) +3. **Valkey**: Distributed cache for high-performance flag evaluation (required for multiple replicas) +4. **Ingress**: External access with TLS support + +## Prerequisites + +- Kubernetes 1.24.0+ +- Helm 3.8+ +- Minimum resources: + - 2 CPU cores + - 4GB RAM + - Default storage class with RWO support +- **CloudNativePG operator** (for embedded PostgreSQL) + - Install once per cluster (see installation instructions below) +- (Optional) Ingress controller (NGINX, Traefik, etc.) +- (Optional) cert-manager for automated TLS certificates + +## Installation + +### Using Replicated Admin Console (KOTS) + +1. **Install the application** through the Replicated admin console +2. **Configure settings** in the admin console UI: + - Ingress and TLS settings + - Database configuration (embedded or external) + - Valkey cache settings + - Resource limits +3. **Deploy** and monitor via the admin console + +The admin console provides: +- One-click deployment +- Configuration validation +- Preflight checks +- Automated updates +- Support bundle generation + +### Using Helm Directly + +**✨ Note:** The CloudNativePG operator is now included as a chart dependency and will be installed automatically. + +### Important: Replicated License Required + +Flipt requires a Replicated development license for local testing. This provides access to: +- Replicated SDK integration +- Admin console features +- Preflight checks +- Support bundle generation + +**Quick Setup:** +```bash +# 1. Set up development license +export REPLICATED_API_TOKEN=your-token +export REPLICATED_LICENSE_ID=your-license-id +``` + +**Detailed instructions:** See [Development License Guide](docs/DEVELOPMENT_LICENSE.md) + +1. **Add the Helm repositories:** + + ```bash + helm repo add flipt-repo https://helm.flipt.io + helm repo add replicated https://charts.replicated.com + helm repo update + ``` + +3. **Install the chart:** + + ```bash + cd chart + helm dependency update + cd .. + + helm install flipt ./chart \ + --namespace flipt \ + --create-namespace \ + --values custom-values.yaml \ + --timeout 10m + ``` + +4. **Wait for deployment:** + + ```bash + kubectl wait --for=condition=ready pod \ + -l app.kubernetes.io/name=flipt \ + -n flipt \ + --timeout=5m + ``` + +## Configuration + +### Key Configuration Options + +The chart can be configured via `values.yaml` or the Replicated admin console: + +#### Flipt Application + +```yaml +flipt: + replicaCount: 2 # Number of Flipt pods (2+ recommended with Valkey) + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 100m + memory: 128Mi +``` + +#### PostgreSQL Database + +```yaml +postgresql: + type: embedded # 'embedded' or 'external' + + # Embedded database (CloudnativePG) + embedded: + enabled: true + cluster: + instances: 1 # 3 for HA + storage: + size: 10Gi + storageClass: "" +``` + +#### Valkey Cache + +```yaml +valkey: + enabled: true # Required for multiple Flipt replicas + image: + repository: ghcr.io/valkey-io/valkey + tag: "8.0" + # Uses emptyDir by default (cache data is ephemeral) +``` + +#### Ingress + +```yaml +flipt: + ingress: + enabled: true + className: nginx + hosts: + - host: flipt.example.com + paths: + - path: / + pathType: Prefix + tls: + - secretName: flipt-tls + hosts: + - flipt.example.com +``` + +## Accessing Flipt + +### Via Ingress + +If ingress is enabled, access Flipt at your configured hostname: + +``` +https://flipt.example.com +``` + +### Via Port Forward + +For local access without ingress: + +```bash +kubectl port-forward -n flipt svc/flipt-flipt 8080:8080 +``` + +Then open: http://localhost:8080 + +### Via LoadBalancer + +Change the service type to LoadBalancer: + +```yaml +flipt: + service: + type: LoadBalancer +``` + +## Using Flipt + +### 1. Create Your First Feature Flag + +Navigate to the Flipt UI and: + +1. Create a new flag (e.g., `new_dashboard`) +2. Set the flag type (boolean, variant, etc.) +3. Configure targeting rules (optional) +4. Enable the flag + +### 2. Integrate with Your Application + +#### Node.js Example + +```javascript +const { FliptClient } = require('@flipt-io/flipt'); + +const client = new FliptClient({ + url: 'http://flipt.example.com', +}); + +// Evaluate a boolean flag +const result = await client.evaluateBoolean({ + namespaceKey: 'default', + flagKey: 'new_dashboard', + entityId: 'user-123', + context: { + email: 'user@example.com', + plan: 'enterprise' + } +}); + +if (result.enabled) { + // Show new dashboard +} +``` + +#### Go Example + +```go +import ( + "context" + flipt "go.flipt.io/flipt/rpc/flipt" + "google.golang.org/grpc" +) + +conn, _ := grpc.Dial("flipt.example.com:9000", grpc.WithInsecure()) +client := flipt.NewFliptClient(conn) + +resp, _ := client.EvaluateBoolean(context.Background(), &flipt.EvaluationRequest{ + NamespaceKey: "default", + FlagKey: "new_dashboard", + EntityId: "user-123", + Context: map[string]string{ + "email": "user@example.com", + "plan": "enterprise", + }, +}) + +if resp.Enabled { + // Show new dashboard +} +``` + +#### Python Example + +```python +from flipt import FliptClient + +client = FliptClient(url="http://flipt.example.com") + +result = client.evaluate_boolean( + namespace_key="default", + flag_key="new_dashboard", + entity_id="user-123", + context={ + "email": "user@example.com", + "plan": "enterprise" + } +) + +if result.enabled: + # Show new dashboard +``` + +### 3. Advanced Features + +#### Percentage Rollouts + +Gradually release features to a percentage of users: + +```yaml +Rules: + - Rollout: 25% # Start with 25% of users + Value: true +``` + +#### User Targeting + +Target specific user segments: + +```yaml +Rules: + - Segment: + Key: email + Constraint: ends_with + Value: "@enterprise.com" + Value: true +``` + +#### A/B Testing + +Create variant flags for experiments: + +```yaml +Variants: + - control: 50% + - treatment_a: 25% + - treatment_b: 25% +``` + +## Scaling & High Availability + +### Horizontal Scaling + +Enable autoscaling for automatic pod scaling: + +```yaml +flipt: + autoscaling: + enabled: true + minReplicas: 2 + maxReplicas: 10 + targetCPUUtilizationPercentage: 80 +``` + +### Database HA + +For production, use 3 PostgreSQL instances: + +```yaml +postgresql: + embedded: + cluster: + instances: 3 +``` + +## Monitoring + +### Prometheus Metrics + +Enable metrics collection: + +```yaml +flipt: + serviceMonitor: + enabled: true +``` + +### Available Metrics + +Flipt exposes metrics at `/metrics`: + +- `flipt_evaluations_total` - Total number of flag evaluations +- `flipt_evaluation_duration_seconds` - Evaluation latency +- `flipt_cache_hits_total` - Cache hit count +- `flipt_cache_misses_total` - Cache miss count + +## Troubleshooting + +### Generate Support Bundle + +Via Replicated admin console: Navigate to Troubleshoot > Generate Support Bundle + +Via CLI: + +```bash +kubectl support-bundle ./replicated/kots-support-bundle.yaml +``` + +### Common Issues + +#### Pods Not Starting + +Check pod status and events: + +```bash +kubectl get pods -n flipt +kubectl describe pod -n flipt +``` + +#### Database Connection Issues + +Check PostgreSQL cluster status: + +```bash +kubectl get cluster -n flipt +kubectl logs -l cnpg.io/cluster=flipt-cluster -n flipt +``` + +#### Valkey Connection Issues + +Check Valkey status: + +```bash +kubectl get pods -l app.kubernetes.io/name=valkey -n flipt +kubectl logs -l app.kubernetes.io/name=valkey -n flipt +``` + +#### Cache Not Working + +Verify Valkey is enabled and Flipt can connect: + +```bash +kubectl exec -it deploy/flipt-flipt -n flipt -- sh +# Inside the pod: +nc -zv flipt-valkey 6379 +``` + +### Debug Logs + +Enable debug logging: + +```yaml +flipt: + config: + log: + level: debug +``` + +## Upgrading + +### Via Replicated Admin Console + +1. Navigate to Version History +2. Select the new version +3. Review changes +4. Deploy + +### Via Helm + +```bash +helm upgrade flipt ./chart \ + --namespace flipt \ + --values custom-values.yaml +``` + +## Uninstallation + +### Via Replicated Admin Console + +Navigate to application settings and select "Remove Application" + +### Via Helm + +```bash +helm uninstall flipt --namespace flipt +``` + +To also remove PVCs: + +```bash +kubectl delete pvc --all -n flipt +``` + +## Security Considerations + +1. **Enable TLS**: Always use TLS in production +2. **Authentication**: Configure authentication methods for the API +3. **Network Policies**: Restrict pod-to-pod communication +4. **Secrets Management**: Use external secret management for sensitive data +5. **RBAC**: Implement Kubernetes RBAC for admin access +6. **Regular Updates**: Keep Flipt and dependencies updated + +### Authentication Setup + +Flipt supports multiple authentication methods: + +```yaml +flipt: + config: + authentication: + methods: + token: + enabled: true + # Or use OIDC + oidc: + enabled: true + issuerURL: "https://accounts.google.com" + clientID: "your-client-id" + clientSecret: "your-client-secret" +``` + +## Performance Tuning + +### Database Optimization + +```yaml +postgresql: + embedded: + cluster: + resources: + limits: + cpu: 2000m + memory: 4Gi + postgresql: + parameters: + max_connections: "200" + shared_buffers: "1GB" +``` + +### Valkey Optimization + +```yaml +valkey: + resources: + limits: + memory: 2Gi +``` + +### Flipt Optimization + +```yaml +flipt: + config: + db: + maxOpenConn: 100 + maxIdleConn: 25 + connMaxLifetime: 1h + cache: + ttl: 10m # Increase cache TTL for more stable flags +``` + +## Resources + +- **Flipt Documentation**: https://docs.flipt.io +- **API Reference**: https://docs.flipt.io/reference/overview +- **SDKs**: https://docs.flipt.io/integration +- **GitHub**: https://github.com/flipt-io/flipt +- **Discord Community**: https://discord.gg/kRhEqG2T +- **Replicated Documentation**: https://docs.replicated.com + +## Support + +For issues with: +- **Flipt application**: https://github.com/flipt-io/flipt/issues +- **Helm chart/deployment**: https://github.com/flipt-io/helm-charts/issues +- **Replicated integration**: https://support.replicated.com + +## License + +- Flipt is licensed under GPL-3.0 +- This Helm chart follows the same GPL-3.0 license +- Replicated SDK has its own licensing terms + +## Contributing + +Contributions are welcome! Please: + +1. Fork the repository +2. Create a feature branch +3. Make your changes +4. Submit a pull request + +## Changelog + +### Version 1.0.0 + +- Initial release +- Flipt v1.61.0 +- PostgreSQL 16 via CloudnativePG +- Valkey 8.0 for distributed caching +- Replicated SDK integration +- Comprehensive KOTS configuration +- Preflight checks and support bundles diff --git a/applications/flipt/TROUBLESHOOTING.md b/applications/flipt/TROUBLESHOOTING.md new file mode 100644 index 00000000..a0a5fc51 --- /dev/null +++ b/applications/flipt/TROUBLESHOOTING.md @@ -0,0 +1,513 @@ +# Flipt Troubleshooting Guide + +Common issues and solutions for deploying Flipt. + +## Installation Issues + +### Error: "no matches for kind 'Cluster' in version 'postgresql.cnpg.io/v1'" + +**Full error:** +``` +Error: INSTALLATION FAILED: unable to build kubernetes objects from release manifest: +resource mapping not found for name: "flipt-cluster" namespace: "flipt" from "": +no matches for kind "Cluster" in version "postgresql.cnpg.io/v1" +ensure CRDs are installed first +``` + +**Cause:** The CloudNativePG operator chart dependency failed to install or CRDs are not present. + +**Solution:** The operator is now included as a chart dependency and should install automatically. If you see this error: + +1. **Check if operator is disabled:** + ```bash + # Ensure operator is enabled (default) + helm install flipt ./chart \ + --namespace flipt \ + --create-namespace \ + --set cloudnative-pg.enabled=true + ``` + +2. **Verify CRDs are installed:** + ```bash + kubectl get crd | grep postgresql.cnpg.io + + # Should show: + # backups.postgresql.cnpg.io + # clusters.postgresql.cnpg.io + # poolers.postgresql.cnpg.io + # scheduledbackups.postgresql.cnpg.io + ``` + +3. **Check operator pods:** + ```bash + kubectl get pods -n flipt + # Look for cloudnative-pg-* pods + ``` + +--- + +### Error: "nil pointer evaluating interface {}.enabled" + +**Full error:** +``` +Error: INSTALLATION FAILED: flipt/templates/postgresql-cluster.yaml:65:32 + executing "flipt/templates/postgresql-cluster.yaml" at + <.Values.postgresql.embedded.cluster.monitoring.enabled>: + nil pointer evaluating interface {}.enabled +``` + +**Cause:** Missing field in values.yaml (should be fixed in the latest version). + +**Solution:** Ensure you're using the latest chart or add to values.yaml: + +```yaml +postgresql: + embedded: + cluster: + monitoring: + enabled: false +``` + +--- + +### Error: Dependencies not found + +**Error:** +``` +Error: found in Chart.yaml, but missing in charts/ directory: flipt, valkey, replicated +``` + +**Solution:** Update Helm dependencies: + +```bash +cd chart +helm dependency update +cd .. +``` + +Or use the Makefile: +```bash +make update-deps +``` + +--- + +### Error: CRD ownership conflict with CloudNativePG + +**Full error:** +``` +Error: INSTALLATION FAILED: unable to continue with install: +CustomResourceDefinition "backups.postgresql.cnpg.io" in namespace "" exists +and cannot be imported into the current release: invalid ownership metadata; +annotation validation error: key "meta.helm.sh/release-name" must equal "flipt": +current value is "cnpg" +``` + +**Cause:** The CloudNativePG operator was previously installed separately at cluster level, and now it's also included as a chart dependency. + +**Solution:** If you already have the operator installed cluster-wide, disable it in the chart: + +```bash +# Install without operator dependency +helm install flipt ./chart \ + --namespace flipt \ + --create-namespace \ + --set cloudnative-pg.enabled=false + +# Or use the Makefile +make install-no-operator +``` + +**Note:** The CloudNativePG operator is now included as a chart dependency by default. If you prefer to manage it separately at the cluster level, use `--set cloudnative-pg.enabled=false`. + +--- + +### Error: Replicated SDK License Required + +**Full error:** +``` +Error: either license in the config file or integration license id must be specified +``` + +**Cause:** No Replicated license is configured. + +**Solution:** Set up a development license: + +```bash +# Quick setup +export REPLICATED_API_TOKEN=your-token +export REPLICATED_LICENSE_ID=your-license-id +``` + +**Detailed guide:** See [docs/DEVELOPMENT_LICENSE.md](docs/DEVELOPMENT_LICENSE.md) + +--- + +### Pods Stuck in Pending State + +**Symptoms:** +```bash +kubectl get pods -n flipt +# Shows pods in "Pending" state +``` + +**Common causes:** + +1. **No storage class available:** + ```bash + kubectl get storageclass + + # If empty, you need a storage class + # For local testing (minikube/kind): + kubectl apply -f https://raw.githubusercontent.com/rancher/local-path-provisioner/master/deploy/local-path-storage.yaml + ``` + +2. **Insufficient resources:** + ```bash + kubectl describe pod -n flipt + + # Look for: + # "0/3 nodes are available: 3 Insufficient cpu" + # "0/3 nodes are available: 3 Insufficient memory" + ``` + + **Solution:** Reduce resource requests in values.yaml or add more nodes. + +3. **PVC not binding:** + ```bash + kubectl get pvc -n flipt + + # If status is "Pending", check events: + kubectl describe pvc -n flipt + ``` + +--- + +### PostgreSQL Cluster Not Starting + +**Check cluster status:** +```bash +kubectl get cluster -n flipt + +# Should show status "Cluster in healthy state" +``` + +**If unhealthy, check pod logs:** +```bash +kubectl logs -l cnpg.io/cluster=flipt-cluster -n flipt + +# Common issues: +# - PVC not available +# - Insufficient permissions +# - Image pull errors +``` + +**Verify operator is running:** +```bash +kubectl get pods -n cnpg-system + +# Should show: +# cnpg-cloudnative-pg-xxx 1/1 Running +``` + +--- + +### Valkey Connection Issues + +**Check Valkey status:** +```bash +kubectl get pods -l app.kubernetes.io/name=valkey -n flipt + +# Should show master (and replica if configured) running +``` + +**Test Valkey connectivity from Flipt pod:** +```bash +kubectl exec -it deploy/flipt-flipt -n flipt -- sh + +# Inside pod: +nc -zv flipt-valkey 6379 +# Should show: Connection to flipt-valkey 6379 port [tcp/*] succeeded! + +# Test with valkey-cli (if available): +valkey-cli -h flipt-valkey -p 6379 -a ping +# Should return: PONG +``` + +**Check Valkey password:** +```bash +kubectl get secret flipt-valkey -n flipt -o jsonpath='{.data.valkey-password}' | base64 -d +``` + +--- + +### Flipt UI Not Accessible + +**Check service:** +```bash +kubectl get svc flipt-flipt -n flipt + +# Should show: +# NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) +# flipt-flipt ClusterIP 10.96.xxx.xxx 8080/TCP,9000/TCP +``` + +**Test port-forward:** +```bash +kubectl port-forward -n flipt svc/flipt-flipt 8080:8080 + +# Open browser to http://localhost:8080 +``` + +**If ingress is enabled, check ingress:** +```bash +kubectl get ingress -n flipt +kubectl describe ingress flipt-flipt -n flipt + +# Common issues: +# - Ingress controller not installed +# - DNS not pointing to ingress +# - TLS certificate issues +``` + +--- + +### Ingress Issues + +**No ingress controller:** +```bash +kubectl get ingressclass + +# If empty, install one: +# For NGINX: +helm upgrade --install ingress-nginx ingress-nginx \ + --repo https://kubernetes.github.io/ingress-nginx \ + --namespace ingress-nginx --create-namespace +``` + +**TLS certificate issues:** +```bash +# Check certificate secret +kubectl get secret flipt-tls -n flipt + +# If using cert-manager, check certificate: +kubectl get certificate -n flipt +kubectl describe certificate flipt-tls -n flipt + +# Check cert-manager logs: +kubectl logs -n cert-manager -l app=cert-manager +``` + +--- + +## Alternative: Use External PostgreSQL + +If you prefer not to use the CloudNativePG operator, use an external PostgreSQL database: + +**values.yaml:** +```yaml +postgresql: + type: external + + embedded: + enabled: false + + external: + enabled: true + host: your-postgres-host.com + port: 5432 + database: flipt + username: flipt + password: your-secure-password + sslMode: require +``` + +**Or use Bitnami PostgreSQL (simpler, single instance):** + +1. Modify Chart.yaml to use Bitnami PostgreSQL instead: + ```yaml + dependencies: + - name: postgresql + version: "12.x.x" + repository: https://charts.bitnami.com/bitnami + condition: postgresql.enabled + ``` + +2. Adjust values.yaml accordingly. + +--- + +## Alternative: Use External Valkey + +If you don't want embedded Valkey: + +**values.yaml:** +```yaml +valkey: + enabled: false + +flipt: + config: + cache: + enabled: false # Disable caching + # Or configure external Valkey: + # backend: valkey + # valkey: + # url: valkey://external-valkey:6379 +``` + +--- + +## Debugging Commands + +### View all resources +```bash +kubectl get all -n flipt +``` + +### Check events +```bash +kubectl get events -n flipt --sort-by='.lastTimestamp' +``` + +### View logs +```bash +# Flipt logs +kubectl logs -l app.kubernetes.io/name=flipt -n flipt --tail=100 -f + +# PostgreSQL logs +kubectl logs -l cnpg.io/cluster=flipt-cluster -n flipt --tail=100 -f + +# Valkey logs +kubectl logs -l app.kubernetes.io/name=valkey -n flipt --tail=100 -f +``` + +### Check configuration +```bash +# View rendered values +helm get values flipt -n flipt + +# View full manifest +helm get manifest flipt -n flipt + +# Test template rendering locally +helm template flipt ./chart --debug +``` + +### Resource usage +```bash +# Check pod resource usage +kubectl top pods -n flipt + +# Check node resource usage +kubectl top nodes +``` + +--- + +## Performance Issues + +### Slow flag evaluations + +**Enable Valkey caching:** +Ensure Valkey is enabled and Flipt is configured to use it: + +```yaml +valkey: + enabled: true + +flipt: + config: + cache: + enabled: true + backend: valkey + ttl: 5m +``` + +**Increase cache TTL:** +```yaml +flipt: + config: + cache: + ttl: 10m # Increase from 5m +``` + +**Scale Flipt horizontally:** +```yaml +flipt: + replicaCount: 3 # More replicas +``` + +### Database performance + +**Check connection pool settings:** +```yaml +flipt: + config: + db: + maxIdleConn: 25 + maxOpenConn: 100 + connMaxLifetime: 1h +``` + +**Scale PostgreSQL:** +```yaml +postgresql: + embedded: + cluster: + instances: 3 # HA cluster + resources: + limits: + cpu: 2000m + memory: 4Gi +``` + +--- + +## Uninstall Issues + +### Complete uninstall +```bash +# Uninstall Flipt +helm uninstall flipt -n flipt + +# Delete PVCs (data will be lost!) +kubectl delete pvc -l app.kubernetes.io/instance=flipt -n flipt + +# Delete namespace +kubectl delete namespace flipt + +# Optionally uninstall operator (if no other apps use it) +helm uninstall cnpg -n cnpg-system +kubectl delete namespace cnpg-system +``` + +### Stuck in terminating state +```bash +# Force delete namespace +kubectl delete namespace flipt --grace-period=0 --force + +# Remove finalizers if needed +kubectl patch namespace flipt -p '{"metadata":{"finalizers":[]}}' --type=merge +``` + +--- + +## Getting Help + +1. **Check logs:** Start with `kubectl logs` for the failing component +2. **Review events:** `kubectl get events -n flipt --sort-by='.lastTimestamp'` +3. **Generate support bundle:** `make support-bundle` +4. **Community:** + - Flipt Discord: https://discord.gg/kRhEqG2T + - Flipt GitHub Issues: https://github.com/flipt-io/flipt/issues + - Replicated Support: https://support.replicated.com + +--- + +## Useful Resources + +- [Flipt Documentation](https://docs.flipt.io) +- [CloudNativePG Documentation](https://cloudnative-pg.io/) +- [Kubernetes Debugging Guide](https://kubernetes.io/docs/tasks/debug/) +- [Helm Troubleshooting](https://helm.sh/docs/faq/troubleshooting/) diff --git a/applications/flipt/chart/Chart.yaml b/applications/flipt/chart/Chart.yaml new file mode 100644 index 00000000..73ebe87f --- /dev/null +++ b/applications/flipt/chart/Chart.yaml @@ -0,0 +1,43 @@ +apiVersion: v2 +name: flipt +description: | + Flipt is an open-source, self-hosted feature flag and experimentation platform. + This enterprise-ready deployment includes PostgreSQL for durable storage and Valkey + for distributed caching across multiple instances. +type: application +version: 1.0.33 +appVersion: "1.61.0" +keywords: + - feature-flags + - feature-toggles + - experimentation + - a-b-testing + - flipt +home: https://flipt.io +sources: + - https://github.com/flipt-io/flipt + - https://github.com/flipt-io/helm-charts +maintainers: + - name: Replicated + url: https://replicated.com + +dependencies: + # CloudNativePG operator for PostgreSQL management + # Note: If you already have the operator installed cluster-wide, disable with: + # --set cloudnative-pg.enabled=false + - name: cloudnative-pg + version: "0.23.0" + repository: https://cloudnative-pg.github.io/charts + condition: cloudnative-pg.enabled + + # Valkey for distributed caching (Redis-compatible) + - name: valkey + version: "0.2.0" + repository: https://valkey.io/valkey-helm/ + condition: valkey.enabled + + # Replicated SDK for admin console integration + - name: replicated + version: "^1.12.0" + repository: oci://registry.replicated.com/library + condition: replicated.enabled diff --git a/applications/flipt/chart/templates/NOTES.txt b/applications/flipt/chart/templates/NOTES.txt new file mode 100644 index 00000000..8d0b4322 --- /dev/null +++ b/applications/flipt/chart/templates/NOTES.txt @@ -0,0 +1,23 @@ +You have successfully deployed Flipt. + +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "flipt.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "flipt.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "flipt.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.webPort }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "flipt.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT + echo "Visit http://127.0.0.1:8080 to use your application" +{{- end }} diff --git a/applications/flipt/chart/templates/_helpers.tpl b/applications/flipt/chart/templates/_helpers.tpl new file mode 100644 index 00000000..3a002ba2 --- /dev/null +++ b/applications/flipt/chart/templates/_helpers.tpl @@ -0,0 +1,183 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "flipt.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "flipt.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create a fully qualified name for the migration job +*/}} +{{- define "flipt.migration_name" -}} +{{- include "flipt.fullname" . | trunc 53 | trimSuffix "-" }}-migration +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "flipt.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "flipt.labels" -}} +helm.sh/chart: {{ include "flipt.chart" . }} +{{ include "flipt.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "flipt.selectorLabels" -}} +app.kubernetes.io/name: {{ include "flipt.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "flipt.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "flipt.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + +{{/* +PostgreSQL connection URL +*/}} +{{- define "flipt.postgresql.url" -}} +{{- if eq .Values.postgresql.type "embedded" }} +{{- printf "postgres://%s:%s@%s-cluster-rw.%s.svc.cluster.local:5432/%s?sslmode=require" .Values.postgresql.username .Values.postgresql.password .Release.Name .Release.Namespace .Values.postgresql.database }} +{{- else }} +{{- printf "postgres://%s:%s@%s:%d/%s?sslmode=%s" .Values.postgresql.external.username .Values.postgresql.external.password .Values.postgresql.external.host (int .Values.postgresql.external.port) .Values.postgresql.external.database .Values.postgresql.external.sslMode }} +{{- end }} +{{- end }} + +{{/* +Valkey connection URL (Redis-compatible) +*/}} +{{- define "flipt.valkey.url" -}} +{{- if .Values.valkey.enabled }} +{{- printf "redis://%s-valkey-svc.%s.svc.cluster.local:6379" .Release.Name .Release.Namespace }} +{{- end }} +{{- end }} + +{{/* +PostgreSQL cluster name for CloudnativePG +*/}} +{{- define "flipt.postgresql.clustername" -}} +{{- printf "%s-cluster" .Release.Name }} +{{- end }} + +{{/* +Database secret name +*/}} +{{- define "flipt.postgresql.secret" -}} +{{- if eq .Values.postgresql.type "embedded" }} +{{- printf "%s-cluster-app" .Release.Name }} +{{- else }} +{{- printf "%s-postgresql-external" (include "flipt.fullname" .) }} +{{- end }} +{{- end }} + +{{/* +Valkey secret name +*/}} +{{- define "flipt.valkey.secret" -}} +{{- printf "%s-valkey" .Release.Name }} +{{- end }} + +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Pod annotations +*/}} +{{- define "common.classes.podAnnotations" -}} + {{- if .Values.podAnnotations -}} + {{- tpl (toYaml .Values.podAnnotations) . | nindent 0 -}} + {{- end -}} + {{- printf "checksum/config: %v" (join "," .Values.flipt | sha256sum) | nindent 0 -}} +{{- end -}} + +{{/* +Deployment annotations +*/}} +{{- define "common.classes.deploymentAnnotations" -}} + {{- if .Values.deploymentAnnotations -}} + {{- tpl (toYaml .Values.deploymentAnnotations) . | nindent 0 -}} + {{- end -}} + {{- printf "checksum/config: %v" (join "," .Values.flipt | sha256sum) | nindent 0 -}} +{{- end -}} + +{{/* Return the target Kubernetes version */}} +{{- define "flipt.tools.kubeVersion" -}} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersionOverride }} +{{- end }} + +{{/* Return the appropriate apiVersion for autoscaling */}} +{{- define "flipt.apiVersion.autoscaling" -}} +{{- if (.Values.apiVersionOverrides).autoscaling -}} +{{- print .Values.apiVersionOverrides.autoscaling -}} +{{- else if semverCompare "<1.23-0" (include "flipt.tools.kubeVersion" .) -}} +{{- print "autoscaling/v2beta1" -}} +{{- else -}} +{{- print "autoscaling/v2" -}} +{{- end -}} +{{- end -}} diff --git a/applications/flipt/chart/templates/_preflight.tpl b/applications/flipt/chart/templates/_preflight.tpl new file mode 100644 index 00000000..956b7f7f --- /dev/null +++ b/applications/flipt/chart/templates/_preflight.tpl @@ -0,0 +1,159 @@ +{{- define "flipt.preflight" -}} +apiVersion: troubleshoot.sh/v1beta2 +kind: Preflight +metadata: + name: flipt-preflight-checks +spec: + analyzers: + # Kubernetes version check + - clusterVersion: + outcomes: + - fail: + when: "< 1.24.0" + message: Flipt requires Kubernetes 1.24.0 or later + uri: https://kubernetes.io/releases/ + - warn: + when: "< 1.27.0" + message: Kubernetes 1.27.0 or later is recommended for best performance + - pass: + when: ">= 1.27.0" + message: Kubernetes version is supported + + # Node resource checks + - nodeResources: + checkName: Minimum CPU cores available + outcomes: + - fail: + when: "sum(cpuCapacity) < 2" + message: The cluster must have at least 2 CPU cores available + - warn: + when: "sum(cpuCapacity) < 4" + message: At least 4 CPU cores are recommended for production + - pass: + message: Sufficient CPU resources available + + - nodeResources: + checkName: Minimum memory available + outcomes: + - fail: + when: "sum(memoryCapacity) < 2Gi" + message: The cluster must have at least 2GB of memory available + - warn: + when: "sum(memoryCapacity) < 4Gi" + message: At least 4GB of memory is recommended for production + - pass: + message: Sufficient memory available + + # Storage checks + - storageClass: + checkName: Default storage class + storageClassName: "" + outcomes: + - fail: + message: No default storage class found. A default storage class is required. + uri: https://kubernetes.io/docs/concepts/storage/storage-classes/ + - pass: + message: Default storage class is available + + - storageClass: + checkName: Storage class with RWO support + storageClassName: "" + outcomes: + - fail: + message: The storage class does not support ReadWriteOnce access mode + - pass: + message: Storage class supports required access modes + + # Database specific checks (for embedded PostgreSQL) + - nodeResources: + checkName: PostgreSQL resource requirements + outcomes: + - fail: + when: "sum(memoryCapacity) < 2Gi" + message: | + At least 2GB of memory is required for embedded PostgreSQL. + Consider using an external database if cluster resources are limited. + - warn: + when: "sum(cpuCapacity) < 2" + message: At least 2 CPU cores recommended for embedded PostgreSQL + - pass: + message: Sufficient resources for embedded PostgreSQL + + # Redis specific checks + - nodeResources: + checkName: Redis resource requirements + outcomes: + - warn: + when: "sum(memoryCapacity) < 1Gi" + message: | + At least 1GB of memory recommended for Redis cache. + Consider using in-memory caching if Redis is disabled. + - pass: + message: Sufficient resources for Redis cache + + # CloudnativePG operator check (for embedded database) + - customResourceDefinition: + checkName: CloudnativePG Operator + customResourceDefinitionName: clusters.postgresql.cnpg.io + outcomes: + - warn: + message: | + CloudnativePG operator is not installed. + The operator will be installed as part of this deployment. + If you prefer to use an external PostgreSQL database, configure it in the admin console. + - pass: + message: CloudnativePG operator is available + + # Distribution-specific checks + - distribution: + outcomes: + - fail: + when: "== docker-desktop" + message: | + Docker Desktop is not recommended for production deployments. + Use a production-grade Kubernetes distribution. + - warn: + when: "== kind" + message: | + kind is detected. This is suitable for development only. + - warn: + when: "== minikube" + message: | + Minikube is detected. This is suitable for development only. + - pass: + message: Kubernetes distribution is suitable for Flipt deployment + + # Cluster resource capacity + - deploymentStatus: + checkName: Cluster is healthy + namespace: kube-system + name: coredns + outcomes: + - fail: + when: "absent" + message: CoreDNS is not running. The cluster may not be healthy. + - fail: + when: "!= Healthy" + message: CoreDNS deployment is not healthy + - pass: + message: Cluster DNS is healthy + + collectors: + - clusterInfo: {} + - clusterResources: {} + - logs: + selector: + - app=flipt + namespace: {{ .Values.namespace | default "flipt" | quote }} + limits: + maxAge: 720h + maxLines: 10000 + - exec: + name: kubectl-version + selector: + - app=flipt + namespace: {{ .Values.namespace | default "flipt" | quote }} + command: ["kubectl"] + args: ["version", "--short"] + timeout: 30s +{{- end -}} diff --git a/applications/flipt/chart/templates/_supportbundle.tpl b/applications/flipt/chart/templates/_supportbundle.tpl new file mode 100644 index 00000000..25b9e660 --- /dev/null +++ b/applications/flipt/chart/templates/_supportbundle.tpl @@ -0,0 +1,263 @@ +{{- define "flipt.supportbundle" -}} +apiVersion: troubleshoot.sh/v1beta2 +kind: SupportBundle +metadata: + name: flipt-support-bundle +spec: + collectors: + # Cluster information + - clusterInfo: {} + - clusterResources: {} + + # Flipt application logs + - logs: + selector: + - app.kubernetes.io/name=flipt + namespace: "{{ .Release.Namespace }}" + limits: + maxAge: 720h + maxLines: 10000 + name: flipt/logs + + # PostgreSQL logs (embedded) + - logs: + selector: + - cnpg.io/cluster={{ .Release.Name }}-cluster + namespace: "{{ .Release.Namespace }}" + limits: + maxAge: 720h + maxLines: 10000 + name: postgresql/logs + + # CloudnativePG operator logs + - logs: + selector: + - app.kubernetes.io/name=cloudnative-pg + namespace: "{{ .Release.Namespace }}" + limits: + maxAge: 720h + maxLines: 10000 + name: cnpg-operator/logs + + # Valkey logs + - logs: + selector: + - app.kubernetes.io/name=valkey + namespace: "{{ .Release.Namespace }}" + limits: + maxAge: 720h + maxLines: 10000 + name: valkey/logs + + # Pod status and events + - pods: + namespace: "{{ .Release.Namespace }}" + selector: + - app.kubernetes.io/name=flipt + + - pods: + namespace: "{{ .Release.Namespace }}" + selector: + - cnpg.io/cluster={{ .Release.Name }}-cluster + + - pods: + namespace: "{{ .Release.Namespace }}" + selector: + - app.kubernetes.io/name=cloudnative-pg + + - pods: + namespace: "{{ .Release.Namespace }}" + selector: + - app.kubernetes.io/name=valkey + + # Service and endpoint information + - services: + namespace: "{{ .Release.Namespace }}" + + - endpoints: + namespace: "{{ .Release.Namespace }}" + + # ConfigMaps and Secrets (redacted) + - configMaps: + namespace: "{{ .Release.Namespace }}" + + - secrets: + namespace: "{{ .Release.Namespace }}" + includeKeys: + - false + + # PVC and storage information + - pvcs: + namespace: "{{ .Release.Namespace }}" + + # Ingress configuration + - ingress: + namespace: "{{ .Release.Namespace }}" + + # PostgreSQL specific diagnostics + - exec: + name: postgresql-version + selector: + - cnpg.io/cluster={{ .Release.Name }}-cluster + namespace: "{{ .Release.Namespace }}" + command: ["psql"] + args: ["-c", "SELECT version();"] + timeout: 30s + + - exec: + name: postgresql-connections + selector: + - cnpg.io/cluster={{ .Release.Name }}-cluster + namespace: "{{ .Release.Namespace }}" + command: ["psql"] + args: ["-c", "SELECT count(*) as connections FROM pg_stat_activity;"] + timeout: 30s + + - exec: + name: postgresql-database-size + selector: + - cnpg.io/cluster={{ .Release.Name }}-cluster + namespace: "{{ .Release.Namespace }}" + command: ["psql"] + args: ["-c", "SELECT pg_size_pretty(pg_database_size('flipt')) as database_size;"] + timeout: 30s + + # Valkey diagnostics + - exec: + name: valkey-info + selector: + - app.kubernetes.io/name=valkey + namespace: "{{ .Release.Namespace }}" + command: ["valkey-cli"] + args: ["INFO"] + timeout: 30s + + - exec: + name: valkey-memory + selector: + - app.kubernetes.io/name=valkey + namespace: "{{ .Release.Namespace }}" + command: ["valkey-cli"] + args: ["INFO", "memory"] + timeout: 30s + + # Flipt API health check + - http: + name: flipt-health + get: + url: http://{{ .Release.Name }}.{{ .Release.Namespace }}.svc.cluster.local:8080/health + timeout: 30s + + # Helm release information + - exec: + name: helm-values + selector: + - app.kubernetes.io/name=flipt + namespace: "{{ .Release.Namespace }}" + command: ["sh"] + args: ["-c", "helm get values {{ .Release.Name }} -n {{ .Release.Namespace }}"] + timeout: 30s + + - exec: + name: helm-manifest + selector: + - app.kubernetes.io/name=flipt + namespace: "{{ .Release.Namespace }}" + command: ["sh"] + args: ["-c", "helm get manifest {{ .Release.Name }} -n {{ .Release.Namespace }}"] + timeout: 30s + + # Node information + - nodeMetrics: {} + + # Storage class information + - storageClasses: {} + + # Network diagnostics + - exec: + name: flipt-to-postgres-connectivity + selector: + - app.kubernetes.io/name=flipt + namespace: "{{ .Release.Namespace }}" + command: ["sh"] + args: ["-c", "nc -zv {{ .Release.Name }}-cluster-rw 5432 || echo 'Cannot connect to PostgreSQL'"] + timeout: 10s + + - exec: + name: flipt-to-valkey-connectivity + selector: + - app.kubernetes.io/name=flipt + namespace: "{{ .Release.Namespace }}" + command: ["sh"] + args: ["-c", "nc -zv {{ .Release.Name }}-valkey-svc 6379 || echo 'Cannot connect to Valkey'"] + timeout: 10s + + analyzers: + # Pod status analysis + - deploymentStatus: + name: flipt + namespace: "{{ .Release.Namespace }}" + outcomes: + - fail: + when: "< 1" + message: Flipt deployment has no ready replicas + - warn: + when: "< {{ .Values.replicaCount | default 1 }}" + message: Flipt deployment has fewer replicas than configured + - pass: + message: Flipt deployment is healthy + + # PostgreSQL cluster health + - clusterPodStatuses: + name: postgresql-cluster-health + namespace: "{{ .Release.Namespace }}" + outcomes: + - fail: + when: "!= Healthy" + message: PostgreSQL cluster is not healthy + - pass: + message: PostgreSQL cluster is healthy + + # Valkey health + - deploymentStatus: + name: flipt-valkey + namespace: "{{ .Release.Namespace }}" + outcomes: + - fail: + when: "< 1" + message: Valkey deployment has no ready replicas + - pass: + message: Valkey is healthy + + # Node resources + - nodeResources: + checkName: Node CPU capacity + outcomes: + - warn: + when: "sum(cpuCapacity) < 2" + message: Less than 2 CPU cores available. Consider scaling cluster for production workloads. + - pass: + message: Sufficient CPU resources + + - nodeResources: + checkName: Node memory capacity + outcomes: + - warn: + when: "sum(memoryCapacity) < 4Gi" + message: Less than 4GB memory available. Consider scaling cluster for production workloads. + - pass: + message: Sufficient memory resources + + # HTTP health check analysis + - textAnalyze: + checkName: Flipt API health + fileName: flipt-health/result.json + regex: '"status": "SERVING"' + outcomes: + - fail: + when: "false" + message: Flipt API health check failed + - pass: + when: "true" + message: Flipt API is healthy +{{- end -}} diff --git a/applications/flipt/chart/templates/deployment.yaml b/applications/flipt/chart/templates/deployment.yaml new file mode 100644 index 00000000..c9765903 --- /dev/null +++ b/applications/flipt/chart/templates/deployment.yaml @@ -0,0 +1,144 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "flipt.fullname" . }} + namespace: {{ .Release.Namespace }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/flipt-config.yaml") . | sha256sum }} + {{- if .Values.deploymentAnnotations }} + {{- toYaml .Values.deploymentAnnotations | nindent 4 }} + {{- end }} + {{- if eq .Values.postgresql.type "embedded" }} + helm.sh/hook: post-install,post-upgrade + helm.sh/hook-weight: "10" + {{- end }} + labels: + {{- include "flipt.labels" . | nindent 4 }} + {{- if .Values.deploymentLabels }} + {{- toYaml .Values.deploymentLabels | nindent 4 }} + {{- end }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + minReadySeconds: {{ .Values.minReadySeconds }} + {{- with .Values.strategy }} + strategy: + {{- toYaml . | nindent 8 }} + {{- end }} + + selector: + matchLabels: + {{- include "flipt.selectorLabels" . | nindent 6 }} + template: + metadata: + annotations: {{- include "common.classes.podAnnotations" . | nindent 8 }} + labels: + {{- include "flipt.selectorLabels" . | nindent 8 }} + {{- if .Values.podLabels }} + {{- toYaml .Values.podLabels | nindent 8 }} + {{- end }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end}} + serviceAccountName: {{ include "flipt.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + {{- toYaml .Values.image.command | nindent 12 }} + ports: + - name: http + containerPort: {{ .Values.config.server.httpPort }} + protocol: TCP + - name: https + containerPort: {{ .Values.service.httpsPort }} + protocol: TCP + - name: grpc + containerPort: {{ .Values.config.server.grpcPort }} + protocol: TCP + env: + - name: FLIPT_META_STATE_DIRECTORY + value: /home/flipt/.config/flipt + - name: FLIPT_META_CHECK_FOR_UPDATES + value: "0" + {{- if eq .Values.postgresql.type "embedded" }} + - name: FLIPT_DB_URL + valueFrom: + secretKeyRef: + name: {{ include "flipt.postgresql.clustername" . }}-app + key: uri + {{- end }} + {{- if and .Values.valkey.enabled .Values.valkey.auth.enable }} + - name: FLIPT_CACHE_REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "flipt.valkey.secret" . }} + key: valkey-password + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + - name: flipt-local-state + mountPath: /home/flipt/.config/flipt + - name: flipt-config + mountPath: /etc/flipt/config/default.yml + readOnly: true + subPath: default.yml + - name: flipt-data + mountPath: /var/opt/flipt + {{- if .Values.persistence.subPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- toYaml .Values.extraVolumeMounts | nindent 12 }} + {{- end }} + livenessProbe: + {{- toYaml .Values.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumes: + - name: flipt-local-state + emptyDir: {} + - name: flipt-config + configMap: + name: {{ include "flipt.fullname" . }} + - name: flipt-data + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ default (include "flipt.fullname" .) .Values.persistence.existingClaim }} + {{- else }} + emptyDir: {} + {{- end }} + {{- if .Values.extraVolumes }} + {{- toYaml .Values.extraVolumes | nindent 8 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.topologySpreadConstraints }} + topologySpreadConstraints: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/applications/flipt/chart/templates/flipt-config.yaml b/applications/flipt/chart/templates/flipt-config.yaml new file mode 100644 index 00000000..8ac83d4b --- /dev/null +++ b/applications/flipt/chart/templates/flipt-config.yaml @@ -0,0 +1,63 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "flipt.fullname" . }} + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-weight": "-1" + labels: + {{- include "flipt.labels" . | nindent 4 }} +data: + default.yml: | + log: + level: {{ .Values.config.log.level }} + encoding: {{ .Values.config.log.encoding }} + + server: + protocol: {{ .Values.config.server.protocol }} + host: {{ .Values.config.server.host }} + http_port: {{ .Values.config.server.httpPort }} + grpc_port: {{ .Values.config.server.grpcPort }} + + db: + # URL is set via FLIPT_DB_URL environment variable + max_idle_conn: {{ .Values.config.db.maxIdleConn }} + max_open_conn: {{ .Values.config.db.maxOpenConn }} + conn_max_lifetime: {{ .Values.config.db.connMaxLifetime }} + + {{- if .Values.valkey.enabled }} + cache: + enabled: {{ .Values.config.cache.enabled }} + backend: {{ .Values.config.cache.backend }} + ttl: {{ .Values.config.cache.ttl }} + redis: + host: {{ .Release.Name }}-valkey-svc.{{ .Release.Namespace }}.svc.cluster.local + port: 6379 + # password is set via FLIPT_CACHE_REDIS_PASSWORD environment variable + mode: {{ .Values.config.cache.redis.mode }} + prefix: {{ .Values.config.cache.redis.prefix | quote }} + {{- else }} + cache: + enabled: true + backend: memory + ttl: {{ .Values.config.cache.ttl }} + memory: + eviction_interval: 2m + {{- end }} + + {{- if .Values.config.cors.enabled }} + cors: + enabled: {{ .Values.config.cors.enabled }} + allowed_origins: + {{- range .Values.config.cors.allowedOrigins }} + - {{ . | quote }} + {{- end }} + {{- end }} + + {{- if .Values.config.authentication.methods.token.enabled }} + authentication: + methods: + token: + enabled: {{ .Values.config.authentication.methods.token.enabled }} + {{- end }} diff --git a/applications/flipt/chart/templates/hpa.yaml b/applications/flipt/chart/templates/hpa.yaml new file mode 100644 index 00000000..23c1b844 --- /dev/null +++ b/applications/flipt/chart/templates/hpa.yaml @@ -0,0 +1,41 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: {{ include "flipt.apiVersion.autoscaling" . }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "flipt.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "flipt.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "flipt.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + {{- if eq (include "flipt.apiVersion.autoscaling" $) "autoscaling/v2beta1" }} + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + {{- if eq (include "flipt.apiVersion.autoscaling" $) "autoscaling/v2beta1" }} + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} + {{- end }} +{{- end }} diff --git a/applications/flipt/chart/templates/ingress.yaml b/applications/flipt/chart/templates/ingress.yaml new file mode 100644 index 00000000..1b598838 --- /dev/null +++ b/applications/flipt/chart/templates/ingress.yaml @@ -0,0 +1,65 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "flipt.fullname" . -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "flipt.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ default $fullName ((default dict .backend).serviceName) }} + port: + {{- if (default dict .backend).servicePort }} + name: {{ (default dict .backend).servicePort }} + {{- else }} + name: http + {{- end }} + {{- else }} + serviceName: {{ default $fullName ((default dict .backend).serviceName) }} + servicePort: {{ default "http" ((default dict .backend).servicePort) }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/applications/flipt/chart/templates/pdb.yaml b/applications/flipt/chart/templates/pdb.yaml new file mode 100644 index 00000000..4762f86e --- /dev/null +++ b/applications/flipt/chart/templates/pdb.yaml @@ -0,0 +1,14 @@ +{{- if (.Values.pdb).enabled }} +--- +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: {{ template "flipt.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "flipt.labels" . | nindent 4 }} +spec: + maxUnavailable: {{ default "25%" .Values.pdb.maxUnavailable }} + selector: + matchLabels: + {{- include "flipt.selectorLabels" . | nindent 6 }} +{{- end }} diff --git a/applications/flipt/chart/templates/postgresql-cluster-job.yaml b/applications/flipt/chart/templates/postgresql-cluster-job.yaml new file mode 100644 index 00000000..6ee3b6f4 --- /dev/null +++ b/applications/flipt/chart/templates/postgresql-cluster-job.yaml @@ -0,0 +1,292 @@ +{{- if and .Values.postgresql.embedded.enabled (eq .Values.postgresql.type "embedded") }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "flipt.fullname" . }}-create-db + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-weight": "-5" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "flipt.labels" . | nindent 4 }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "flipt.fullname" . }}-create-db + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-weight": "-5" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "flipt.labels" . | nindent 4 }} +rules: + - apiGroups: ["postgresql.cnpg.io"] + resources: ["clusters"] + verbs: ["get", "create", "patch"] + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "flipt.fullname" . }}-create-db + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-weight": "-5" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "flipt.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "flipt.fullname" . }}-create-db +subjects: + - kind: ServiceAccount + name: {{ include "flipt.fullname" . }}-create-db + namespace: {{ .Release.Namespace }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "flipt.fullname" . }}-db-manifest + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-weight": "-5" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "flipt.labels" . | nindent 4 }} +data: + cluster.yaml: | + apiVersion: postgresql.cnpg.io/v1 + kind: Cluster + metadata: + name: {{ include "flipt.postgresql.clustername" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "flipt.labels" . | nindent 8 }} + spec: + instances: {{ .Values.postgresql.embedded.cluster.instances }} + + imageName: {{ .Values.postgresql.embedded.cluster.imageName }} + + bootstrap: + initdb: + database: {{ .Values.postgresql.database }} + owner: {{ .Values.postgresql.username }} + + storage: + size: {{ .Values.postgresql.embedded.cluster.storage.size }} + {{- if .Values.postgresql.embedded.cluster.storage.storageClass }} + storageClass: {{ .Values.postgresql.embedded.cluster.storage.storageClass }} + {{- end }} + + resources: + {{- toYaml .Values.postgresql.embedded.cluster.resources | nindent 8 }} + + {{- if .Values.postgresql.embedded.cluster.backup.enabled }} + backup: + {{- toYaml .Values.postgresql.embedded.cluster.backup | nindent 8 }} + {{- end }} + + postgresql: + parameters: + max_connections: "200" + shared_buffers: "256MB" + effective_cache_size: "1GB" + maintenance_work_mem: "64MB" + checkpoint_completion_target: "0.9" + wal_buffers: "16MB" + default_statistics_target: "100" + random_page_cost: "1.1" + effective_io_concurrency: "200" + work_mem: "2621kB" + min_wal_size: "1GB" + max_wal_size: "4GB" + {{- if gt (int .Values.postgresql.embedded.cluster.instances) 1 }} + syncReplicaElectionConstraint: + enabled: true + nodeLabelsAntiAffinity: + - kubernetes.io/hostname + {{- end }} + + monitoring: + enablePodMonitor: {{ .Values.postgresql.embedded.cluster.monitoring.enabled | default false }} +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "flipt.fullname" . }}-create-db + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-weight": "0" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "flipt.labels" . | nindent 4 }} +spec: + backoffLimit: 10 + template: + metadata: + labels: + {{- include "flipt.selectorLabels" . | nindent 8 }} + spec: + serviceAccountName: {{ include "flipt.fullname" . }}-create-db + restartPolicy: OnFailure + containers: + - name: create-cluster + image: "{{ .Values.dbJob.image.registry }}/{{ .Values.dbJob.image.repository }}:{{ .Values.dbJob.image.tag }}" + imagePullPolicy: {{ .Values.dbJob.image.pullPolicy }} + command: + - /bin/bash + - -c + - | + set -e + + echo "Waiting for CloudNativePG operator webhook to be ready..." + for i in {1..60}; do + if kubectl get endpoints cnpg-webhook-service -n {{ .Release.Namespace }} &>/dev/null; then + ENDPOINTS=$(kubectl get endpoints cnpg-webhook-service -n {{ .Release.Namespace }} -o jsonpath='{.subsets[*].addresses[*].ip}') + if [ -n "$ENDPOINTS" ]; then + echo "CloudNativePG webhook is ready!" + break + fi + fi + echo "Waiting for webhook endpoints... (attempt $i/60)" + sleep 5 + done + + echo "Creating PostgreSQL Cluster..." + if kubectl get cluster {{ include "flipt.postgresql.clustername" . }} -n {{ .Release.Namespace }} &>/dev/null; then + echo "Cluster already exists, patching..." + kubectl apply -f /manifests/cluster.yaml + else + echo "Creating new cluster..." + kubectl create -f /manifests/cluster.yaml + fi + + echo "Waiting for PostgreSQL Cluster to be ready..." + for i in {1..60}; do + READY=$(kubectl get cluster {{ include "flipt.postgresql.clustername" . }} -n {{ .Release.Namespace }} -o jsonpath='{.status.phase}' 2>/dev/null || echo "") + if [ "$READY" = "Cluster in healthy state" ]; then + echo "PostgreSQL Cluster is ready!" + break + fi + echo "Waiting for cluster to be ready... (attempt $i/60, status: $READY)" + sleep 10 + done + + # Verify the cluster is truly ready + kubectl wait --for=condition=Ready pod -l cnpg.io/cluster={{ include "flipt.postgresql.clustername" . }} -n {{ .Release.Namespace }} --timeout=600s + + echo "PostgreSQL Cluster created and ready!" + volumeMounts: + - name: manifests + mountPath: /manifests + volumes: + - name: manifests + configMap: + name: {{ include "flipt.fullname" . }}-db-manifest +--- +# Pre-delete hook to clean up the CNPG PostgreSQL Cluster CR +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "flipt.fullname" . }}-delete-db + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-delete + "helm.sh/hook-weight": "-5" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "flipt.labels" . | nindent 4 }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "flipt.fullname" . }}-delete-db + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-delete + "helm.sh/hook-weight": "-5" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "flipt.labels" . | nindent 4 }} +rules: + - apiGroups: ["postgresql.cnpg.io"] + resources: ["clusters"] + verbs: ["get", "delete"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "flipt.fullname" . }}-delete-db + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-delete + "helm.sh/hook-weight": "-5" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "flipt.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "flipt.fullname" . }}-delete-db +subjects: + - kind: ServiceAccount + name: {{ include "flipt.fullname" . }}-delete-db + namespace: {{ .Release.Namespace }} +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "flipt.fullname" . }}-delete-db + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-delete + "helm.sh/hook-weight": "0" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "flipt.labels" . | nindent 4 }} +spec: + backoffLimit: 3 + template: + metadata: + labels: + {{- include "flipt.selectorLabels" . | nindent 8 }} + spec: + serviceAccountName: {{ include "flipt.fullname" . }}-delete-db + restartPolicy: OnFailure + containers: + - name: delete-cluster + image: "{{ .Values.dbJob.image.registry }}/{{ .Values.dbJob.image.repository }}:{{ .Values.dbJob.image.tag }}" + imagePullPolicy: {{ .Values.dbJob.image.pullPolicy }} + command: + - /bin/bash + - -c + - | + set -e + + CLUSTER_NAME="{{ include "flipt.postgresql.clustername" . }}" + NAMESPACE="{{ .Release.Namespace }}" + + if kubectl get cluster "$CLUSTER_NAME" -n "$NAMESPACE" &>/dev/null; then + echo "Deleting PostgreSQL Cluster $CLUSTER_NAME..." + kubectl delete cluster "$CLUSTER_NAME" -n "$NAMESPACE" --wait=true --timeout=120s + echo "PostgreSQL Cluster deleted." + else + echo "PostgreSQL Cluster $CLUSTER_NAME not found, skipping." + fi +{{- end }} diff --git a/applications/flipt/chart/templates/postgresql-external-secret.yaml b/applications/flipt/chart/templates/postgresql-external-secret.yaml new file mode 100644 index 00000000..a9b99dec --- /dev/null +++ b/applications/flipt/chart/templates/postgresql-external-secret.yaml @@ -0,0 +1,19 @@ +{{- if and (eq .Values.postgresql.type "external") .Values.postgresql.external.enabled }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "flipt.fullname" . }}-postgresql-external + namespace: {{ .Release.Namespace }} + labels: + {{- include "flipt.labels" . | nindent 4 }} +type: Opaque +stringData: + host: {{ .Values.postgresql.external.host | quote }} + port: {{ .Values.postgresql.external.port | quote }} + database: {{ .Values.postgresql.external.database | quote }} + username: {{ .Values.postgresql.external.username | quote }} + password: {{ .Values.postgresql.external.password | quote }} + sslMode: {{ .Values.postgresql.external.sslMode | quote }} + url: {{ include "flipt.postgresql.url" . | quote }} +{{- end }} diff --git a/applications/flipt/chart/templates/secret-preflights.yaml b/applications/flipt/chart/templates/secret-preflights.yaml new file mode 100644 index 00000000..02fa4ba5 --- /dev/null +++ b/applications/flipt/chart/templates/secret-preflights.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Release.Name }}-preflights + labels: + troubleshoot.sh/kind: preflight +type: Opaque +stringData: + preflight.yaml: | +{{ include "flipt.preflight" . | indent 4 }} diff --git a/applications/flipt/chart/templates/secret-supportbundle.yaml b/applications/flipt/chart/templates/secret-supportbundle.yaml new file mode 100644 index 00000000..529232e5 --- /dev/null +++ b/applications/flipt/chart/templates/secret-supportbundle.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Release.Name }}-supportbundle + labels: + troubleshoot.sh/kind: support-bundle +type: Opaque +stringData: + support-bundle-spec: | +{{ include "flipt.supportbundle" . | indent 4 }} diff --git a/applications/flipt/chart/templates/service.yaml b/applications/flipt/chart/templates/service.yaml new file mode 100644 index 00000000..912190a1 --- /dev/null +++ b/applications/flipt/chart/templates/service.yaml @@ -0,0 +1,54 @@ +{{- if or .Values.service.enabled (not (hasKey .Values.service "enabled")) }} +{{- $root := . }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "flipt.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "flipt.labels" . | nindent 4 }} + {{- with .Values.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- with .Values.service.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }} + type: ClusterIP + {{- with .Values.service.clusterIP }} + clusterIP: {{ . }} + {{- end }} + {{- else if eq .Values.service.type "LoadBalancer" }} + type: {{ .Values.service.type }} + {{- with .Values.service.loadBalancerIP }} + loadBalancerIP: {{ . }} + {{- end }} + {{- with .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- else }} + type: {{ .Values.service.type }} + {{- end }} + {{- with .Values.service.externalIPs }} + externalIPs: + {{- toYaml . | nindent 4 }} + {{- end }} + ports: + - port: {{ .Values.service.httpPort }} + targetPort: http + protocol: TCP + name: http + - port: {{ .Values.service.httpsPort }} + targetPort: https + protocol: TCP + name: https + - port: {{ .Values.service.grpcPort }} + targetPort: grpc + protocol: TCP + name: grpc + selector: + {{- include "flipt.selectorLabels" . | nindent 4 }} +{{- end }} diff --git a/applications/flipt/chart/templates/serviceaccount.yaml b/applications/flipt/chart/templates/serviceaccount.yaml new file mode 100644 index 00000000..5ee04ff7 --- /dev/null +++ b/applications/flipt/chart/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "flipt.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "flipt.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/applications/flipt/chart/templates/valkey-service.yaml b/applications/flipt/chart/templates/valkey-service.yaml new file mode 100644 index 00000000..435cd460 --- /dev/null +++ b/applications/flipt/chart/templates/valkey-service.yaml @@ -0,0 +1,21 @@ +{{- if .Values.valkey.enabled }} +# Fix for broken Valkey chart service (targets wrong port name "http" instead of "valkey") +apiVersion: v1 +kind: Service +metadata: + name: {{ .Release.Name }}-valkey-svc + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: valkey + app.kubernetes.io/instance: {{ .Release.Name }} +spec: + type: ClusterIP + ports: + - port: 6379 + targetPort: 6379 + protocol: TCP + name: valkey + selector: + app.kubernetes.io/name: valkey + app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/applications/flipt/chart/values.yaml b/applications/flipt/chart/values.yaml new file mode 100644 index 00000000..5dab938e --- /dev/null +++ b/applications/flipt/chart/values.yaml @@ -0,0 +1,290 @@ +## Flipt Feature Flag Platform Configuration +## Standalone chart with PostgreSQL and Valkey support + +global: + ## Global image registry override + ## Useful for air-gap deployments or custom registries + imageRegistry: "" + +## CloudNativePG Operator Configuration +## Set to false if operator is already installed cluster-wide +cloudnative-pg: + enabled: true + +## Flipt Application Configuration + +replicaCount: 2 +minReadySeconds: 0 + +image: + registry: ghcr.io + repository: flipt-io/flipt + pullPolicy: IfNotPresent + tag: "v1.61.0" # Note: Flipt versions require 'v' prefix + command: ["/flipt"] + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + create: true + annotations: {} + name: "" + +podAnnotations: {} +podLabels: {} + +deploymentAnnotations: {} +deploymentLabels: {} + +podSecurityContext: + runAsUser: 100 + runAsGroup: 1000 + fsGroup: 1000 + runAsNonRoot: true + +securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 100 + seccompProfile: + type: "RuntimeDefault" + +## Flipt Configuration +## See https://docs.flipt.io/configuration/overview for all options +config: + ## Logging configuration + log: + level: info + encoding: json + + ## Server configuration + server: + protocol: http + host: 0.0.0.0 + httpPort: 8080 + grpcPort: 9000 + + ## Database configuration (templated from PostgreSQL values) + db: + url: "" # Will be set via template + maxIdleConn: 10 + maxOpenConn: 50 + connMaxLifetime: 1h + + ## Cache configuration (templated from Valkey values) + cache: + enabled: true + backend: redis # Valkey is Redis-compatible, use 'redis' backend + ttl: 5m + redis: + url: "" # Will be set via template + mode: single # or 'cluster' for cluster mode + prefix: "flipt" + + ## CORS configuration for web UI + cors: + enabled: true + allowedOrigins: + - "*" + + ## Authentication (optional, configure for production) + authentication: + methods: + token: + enabled: true + +## Health check probes +readinessProbe: + httpGet: + path: /health + port: http + initialDelaySeconds: 3 + +livenessProbe: + httpGet: + path: /health + port: http + initialDelaySeconds: 3 + +## Resource limits for Flipt pods +resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 100m + memory: 128Mi + +## Service configuration +service: + enabled: true + type: ClusterIP + httpPort: 8080 + httpsPort: 443 + grpcPort: 9000 + annotations: {} + labels: {} + +## Ingress configuration +ingress: + enabled: false + className: "" + annotations: {} + # cert-manager.io/cluster-issuer: letsencrypt-prod + # nginx.ingress.kubernetes.io/ssl-redirect: "true" + hosts: + - host: flipt.local + paths: + - path: / + pathType: ImplementationSpecific + backend: + servicePort: http + serviceName: "" + tls: [] + # - secretName: flipt-tls + # hosts: + # - flipt.local + +## Horizontal Pod Autoscaler +autoscaling: + enabled: false + minReplicas: 2 + maxReplicas: 10 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 80 + +## Pod Disruption Budget (recommended for HA) +podDisruptionBudget: + enabled: true + minAvailable: 1 + +## ServiceMonitor for Prometheus metrics +serviceMonitor: + enabled: false + interval: 30s + scrapeTimeout: 10s + labels: {} + +## Persistence (optional) +persistence: + enabled: false + storageClass: "" + accessMode: ReadWriteOnce + size: 8Gi + +## Extra environment variables +extraEnvVars: [] + +## Extra volumes +extraVolumes: [] + +## Extra volume mounts +extraVolumeMounts: [] + +## Node selector +nodeSelector: {} + +## Tolerations +tolerations: [] + +## Affinity rules +affinity: {} + +## Database Job Configuration (for PostgreSQL cluster creation) +dbJob: + image: + registry: docker.io + repository: alpine/k8s + tag: "1.33.7" + pullPolicy: IfNotPresent + +## PostgreSQL Database Configuration +postgresql: + ## Use embedded PostgreSQL (CloudnativePG) or external database + type: embedded # 'embedded' or 'external' + + ## Embedded PostgreSQL using CloudnativePG operator + embedded: + enabled: true + ## Database cluster configuration + cluster: + instances: 1 # 3 recommended for production HA + storage: + size: 10Gi + storageClass: "" # Uses default storage class + ## PostgreSQL version + imageName: ghcr.io/cloudnative-pg/postgresql:16 + ## Resource limits + resources: + limits: + cpu: 1000m + memory: 1Gi + requests: + cpu: 100m + memory: 256Mi + ## Enable continuous backup (optional) + backup: + enabled: false + ## Enable monitoring (optional) + monitoring: + enabled: false + + ## External PostgreSQL connection details + external: + enabled: false + host: postgresql.example.com + port: 5432 + database: flipt + username: flipt + password: "" # Set via secret or KOTS config + sslMode: require + + ## Database initialization + database: flipt + username: flipt + password: "" # Auto-generated if empty (for embedded) + +## Valkey Cache Configuration (Redis-compatible) +valkey: + enabled: true + + ## Valkey image configuration + image: + repository: ghcr.io/valkey-io/valkey + tag: "8.0" + pullPolicy: IfNotPresent + + ## Authentication (ACL-based) + ## Note: Flipt connects without auth since Valkey chart doesn't create secrets + auth: + enable: false + + ## Persistence (emptyDir - cache data is ephemeral) + ## Note: Official Valkey chart v0.2.0 has a bug with PVC creation + ## For persistent storage, create PVC manually and set storage.persistentVolumeClaimName + storage: + requestedSize: null + className: null + + ## Resources + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 100m + memory: 128Mi + +## Replicated SDK Integration +replicated: + enabled: true + integration: + enabled: true + licenseID: "" diff --git a/applications/flipt/docs/DEVELOPMENT_LICENSE.md b/applications/flipt/docs/DEVELOPMENT_LICENSE.md new file mode 100644 index 00000000..2421256e --- /dev/null +++ b/applications/flipt/docs/DEVELOPMENT_LICENSE.md @@ -0,0 +1,231 @@ +# Development License Guide + +This guide explains how to obtain and configure a Replicated development license for local testing of Flipt. + +## Why a License is Required + +Flipt integrates with Replicated's SDK to provide: +- Admin console integration +- Preflight checks +- Support bundle generation +- License enforcement +- Automated updates + +The Replicated SDK requires a valid license to function, even in development environments. + +## Quick Start + +### Option 1: Automated Setup (Recommended) + +```bash +# 1. Set up your Replicated API token +export REPLICATED_API_TOKEN=your-token-here + +# 2. Run the setup script +./scripts/setup-dev-license.sh + +# 3. Load the license +source .replicated/license.env + +# 4. Install Flipt +./scripts/install.sh +``` + +### Option 2: Using Makefile + +```bash +# Set up license and install in one command +export REPLICATED_API_TOKEN=your-token-here +make install-with-license +``` + +## Prerequisites + +### 1. Replicated CLI + +Install the Replicated CLI: + +```bash +# macOS +brew install replicatedhq/replicated/cli + +# Linux/macOS (alternative) +curl -s https://api.github.com/repos/replicatedhq/replicated/releases/latest | \ + grep "browser_download_url.*$(uname -s)_$(uname -m)" | \ + cut -d '"' -f 4 | \ + xargs curl -L -o replicated +chmod +x replicated +sudo mv replicated /usr/local/bin/ +``` + +Verify installation: +```bash +replicated version +``` + +### 2. Replicated API Token + +1. Log in to [vendor.replicated.com](https://vendor.replicated.com) +2. Navigate to **Settings** > **Service Accounts** +3. Click **Create Service Account** +4. Copy the API token +5. Export it: + ```bash + export REPLICATED_API_TOKEN=your-token-here + ``` + + Or add to your shell profile (~/.bashrc, ~/.zshrc): + ```bash + echo 'export REPLICATED_API_TOKEN=your-token-here' >> ~/.zshrc + source ~/.zshrc + ``` + +## Manual License Setup + +If you prefer manual setup or need more control: + +### Step 1: Create a Development Customer + +```bash +replicated customer create \ + --app flipt \ + --name "dev-$(whoami)-$(date +%s)" \ + --channel Unstable \ + --license-type dev \ + --output json > customer.json +``` + +### Step 2: Extract License ID + +```bash +LICENSE_ID=$(jq -r '.id' customer.json) +echo "License ID: $LICENSE_ID" +``` + +### Step 3: Save License Configuration + +```bash +mkdir -p .replicated +echo "REPLICATED_LICENSE_ID=$LICENSE_ID" > .replicated/license.env +``` + +### Step 4: Use License + +```bash +source .replicated/license.env +./scripts/install.sh +``` + +## License Management + +### View Your Licenses + +```bash +replicated customer ls +``` + +### Delete a License + +```bash +replicated customer rm --customer "customer-name" + +# Or delete all dev licenses +replicated customer ls --output json | \ + jq -r '.[] | select(.licenseType == "dev") | .name' | \ + xargs -I {} replicated customer rm --customer {} +``` + +### License Expiry + +Development licenses may have expiration dates. If your license expires: + +1. Delete the old license: + ```bash + make clean-license + ``` + +2. Create a new one: + ```bash + ./scripts/setup-dev-license.sh + ``` + +## Troubleshooting + +### Error: "replicated: command not found" + +Install the Replicated CLI (see Prerequisites above). + +### Error: "unauthorized: authentication required" + +Your API token may be invalid or expired: +1. Verify token: `replicated api version` +2. Generate new token at vendor.replicated.com +3. Export new token: `export REPLICATED_API_TOKEN=new-token` + +### Error: "license not found" + +The license secret may not be created: +```bash +# Verify secret exists +kubectl get secret replicated-license -n flipt + +# Recreate if missing +kubectl create secret generic replicated-license \ + --from-literal=license="$REPLICATED_LICENSE_ID" \ + --namespace flipt +``` + +### Pod Still Crashing + +Check Replicated SDK logs: +```bash +kubectl logs -l app=replicated -n flipt +``` + +Common issues: +- License ID is incorrect +- License has expired +- Network issues accessing Replicated services + +## CI/CD Integration + +For automated testing in CI/CD: + +```yaml +# Example GitHub Actions +- name: Setup Replicated License + env: + REPLICATED_API_TOKEN: ${{ secrets.REPLICATED_API_TOKEN }} + run: | + ./scripts/setup-dev-license.sh + source .replicated/license.env + +- name: Install Flipt + run: | + source .replicated/license.env + ./scripts/install.sh + +- name: Cleanup License + if: always() + run: | + make clean-license +``` + +## Alternative: Disable Replicated SDK + +If you absolutely need to run without a license (not recommended for production testing): + +```bash +helm install flipt ./chart \ + --namespace flipt \ + --create-namespace \ + --set replicated.enabled=false +``` + +**Note:** This disables all Replicated features including support bundles and preflight checks. + +## Resources + +- [Replicated CLI Documentation](https://docs.replicated.com/reference/replicated-cli) +- [Customer Management](https://docs.replicated.com/vendor/customers-managing) +- [License Types](https://docs.replicated.com/vendor/licenses-about) diff --git a/applications/flipt/examples/README.md b/applications/flipt/examples/README.md new file mode 100644 index 00000000..0d9869a0 --- /dev/null +++ b/applications/flipt/examples/README.md @@ -0,0 +1,342 @@ +# Flipt Examples + +This directory contains examples for testing and integrating with Flipt. + +For Helm installation and Kubernetes deployment instructions, see the [top-level README](../README.md). + +## Directory Structure + +``` +examples/ +├── kubernetes/ # Helm values examples +│ ├── values-minimal.yaml +│ ├── values-production.yaml +│ └── values-external-db.yaml +└── sdk/ # SDK integration examples + ├── nodejs-example.js + ├── golang-example.go + └── python-example.py +``` + +## Smoke Tests + +Smoke tests are located in [`tests/smoke/`](../tests/smoke/). See the test script there for usage instructions. + +## SDK Integration Examples + +### Node.js + +The Node.js example demonstrates: +- Simple boolean flag evaluation +- Variant flags for A/B testing +- Batch flag evaluation +- Express middleware integration +- Local caching with TTL + +**Run the example:** + +```bash +cd sdk +npm install @flipt-io/flipt +export FLIPT_URL=http://localhost:8080 +node nodejs-example.js +``` + +**Key features:** +- ✅ Boolean flags +- ✅ Variant flags (A/B testing) +- ✅ Batch evaluation +- ✅ Express middleware +- ✅ Caching layer +- ✅ Error handling + +### Go + +The Go example demonstrates: +- gRPC client integration +- Boolean and variant flag evaluation +- HTTP middleware +- Cached client with TTL +- Production-ready patterns + +**Run the example:** + +```bash +cd sdk +go mod init example +go get go.flipt.io/flipt/rpc/flipt +export FLIPT_ADDR=localhost:9000 +go run golang-example.go +``` + +**Key features:** +- ✅ gRPC client +- ✅ Boolean flags +- ✅ Variant flags +- ✅ HTTP middleware +- ✅ Client-side caching +- ✅ Context propagation + +### Python + +The Python example demonstrates: +- HTTP REST API client +- Flask middleware integration +- Django middleware integration +- FastAPI dependency injection +- Caching with TTL + +**Run the example:** + +```bash +cd sdk +pip install requests flask # or django, or fastapi +export FLIPT_URL=http://localhost:8080 +python python-example.py +``` + +**Key features:** +- ✅ REST API client +- ✅ Flask integration +- ✅ Django integration +- ✅ FastAPI integration +- ✅ Client-side caching +- ✅ Error handling + +## Common Use Cases + +### 1. Feature Rollout + +Gradually enable a feature for increasing percentages of users: + +```javascript +// Week 1: 10% rollout +// Week 2: 25% rollout +// Week 3: 50% rollout +// Week 4: 100% rollout + +const enabled = await flipt.evaluateBoolean({ + flagKey: 'new_feature', + entityId: userId, + context: { /* user attributes */ } +}); +``` + +### 2. User Targeting + +Enable features for specific user segments: + +```javascript +const enabled = await flipt.evaluateBoolean({ + flagKey: 'premium_feature', + entityId: userId, + context: { + plan: 'enterprise', + email: user.email + } +}); +``` + +### 3. A/B Testing + +Run experiments with multiple variants: + +```javascript +const variant = await flipt.evaluateVariant({ + flagKey: 'checkout_experiment', + entityId: userId, + context: { /* user attributes */ } +}); + +switch (variant.variantKey) { + case 'control': + // Original experience + break; + case 'variant_a': + // Variant A experience + break; + case 'variant_b': + // Variant B experience + break; +} +``` + +### 4. Kill Switch + +Instantly disable a problematic feature: + +```javascript +// Simply toggle the flag off in Flipt UI +// All evaluations will immediately return false +const enabled = await flipt.evaluateBoolean({ + flagKey: 'problematic_feature', + entityId: userId +}); +``` + +### 5. Environment-Specific Config + +Different behavior per environment: + +```javascript +// In Flipt, set different values per environment: +// - dev: feature_enabled = true +// - staging: feature_enabled = true +// - production: feature_enabled = false + +const enabled = await flipt.evaluateBoolean({ + namespaceKey: process.env.ENVIRONMENT, + flagKey: 'experimental_feature', + entityId: userId +}); +``` + +## Best Practices + +### 1. Always Provide Default Values + +```javascript +const enabled = client.evaluateBoolean(/* ... */) || false; +``` + +### 2. Use Caching for High-Traffic Endpoints + +```javascript +const cachedClient = new CachedFliptClient(client, 60000); // 1 min TTL +``` + +### 3. Handle Errors Gracefully + +```javascript +try { + const enabled = await client.evaluateBoolean(/* ... */); +} catch (error) { + // Log error and return safe default + console.error('Flag evaluation failed:', error); + return false; +} +``` + +### 4. Use Meaningful Entity IDs + +```javascript +// Good: Consistent user identifier +entityId: user.id + +// Bad: Random or changing identifiers +entityId: Math.random() +``` + +### 5. Provide Rich Context + +```javascript +context: { + email: user.email, + plan: user.subscription.plan, + region: user.region, + accountAge: calculateAge(user.createdAt), + // Add any attribute you might want to target on +} +``` + +### 6. Monitor Flag Evaluations + +```javascript +const result = await client.evaluateBoolean(/* ... */); + +// Log or send metrics +metrics.increment('flipt.evaluation', { + flag: 'feature_name', + result: result.enabled +}); +``` + +## Testing Feature Flags + +### Unit Tests + +Mock the Flipt client in your tests: + +```javascript +// Jest example +jest.mock('@flipt-io/flipt'); + +test('shows new dashboard when flag enabled', () => { + FliptClient.prototype.evaluateBoolean.mockResolvedValue({ + enabled: true + }); + + // Test code that uses the flag +}); +``` + +### Integration Tests + +Use a test Flipt instance: + +```javascript +const testClient = new FliptClient({ + url: 'http://flipt-test:8080' +}); +``` + +### Local Development + +Override flags for development: + +```javascript +const FEATURE_OVERRIDES = { + 'new_feature': process.env.NODE_ENV === 'development' +}; + +const enabled = FEATURE_OVERRIDES[flagKey] ?? + await client.evaluateBoolean(/* ... */); +``` + +## Troubleshooting + +### Connection Issues + +```bash +# Check Flipt is accessible +curl http://flipt.flipt.svc.cluster.local:8080/health + +# Port forward for local testing +kubectl port-forward -n flipt svc/flipt-flipt 8080:8080 +``` + +### Cache Issues + +```javascript +// Clear cache if flags aren't updating +cachedClient.cache.clear(); +``` + +### Debug Logging + +```javascript +// Enable debug logging +const client = new FliptClient({ + url: 'http://flipt:8080', + debug: true +}); +``` + +## Additional Resources + +- [Flipt Documentation](https://docs.flipt.io) +- [SDK Reference](https://docs.flipt.io/integration) +- [API Documentation](https://docs.flipt.io/reference/overview) +- [Best Practices](https://docs.flipt.io/guides/best-practices) + +## Contributing + +Have a useful example? Please contribute! + +1. Add your example to the appropriate directory +2. Update this README +3. Submit a pull request + +## License + +Examples are provided under the same license as the parent repository. diff --git a/applications/flipt/examples/kubernetes/values-external-db.yaml b/applications/flipt/examples/kubernetes/values-external-db.yaml new file mode 100644 index 00000000..03485418 --- /dev/null +++ b/applications/flipt/examples/kubernetes/values-external-db.yaml @@ -0,0 +1,36 @@ +# Values for using external PostgreSQL database +# Use this when you have an existing PostgreSQL instance + +flipt: + enabled: true + replicaCount: 2 + + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 100m + memory: 128Mi + +postgresql: + type: external + + embedded: + enabled: false + + external: + enabled: true + host: postgresql.database.svc.cluster.local + port: 5432 + database: flipt + username: flipt + password: "your-secure-password-here" # Use secret in production + sslMode: require + +redis: + enabled: true + architecture: standalone + +replicated: + enabled: true diff --git a/applications/flipt/examples/kubernetes/values-minimal.yaml b/applications/flipt/examples/kubernetes/values-minimal.yaml new file mode 100644 index 00000000..eedc7849 --- /dev/null +++ b/applications/flipt/examples/kubernetes/values-minimal.yaml @@ -0,0 +1,37 @@ +# Minimal values.yaml for development/testing +# Single replica, embedded database and Redis + +flipt: + enabled: true + replicaCount: 1 + + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 100m + memory: 128Mi + + ingress: + enabled: false # Use port-forward for local access + +postgresql: + type: embedded + embedded: + enabled: true + cluster: + instances: 1 + storage: + size: 5Gi + +redis: + enabled: true + architecture: standalone + master: + persistence: + enabled: true + size: 2Gi + +replicated: + enabled: true diff --git a/applications/flipt/examples/kubernetes/values-production.yaml b/applications/flipt/examples/kubernetes/values-production.yaml new file mode 100644 index 00000000..55ca4e78 --- /dev/null +++ b/applications/flipt/examples/kubernetes/values-production.yaml @@ -0,0 +1,129 @@ +# Production values.yaml +# High availability with multiple replicas, Redis caching, and monitoring + +flipt: + enabled: true + replicaCount: 3 + + resources: + limits: + cpu: 1000m + memory: 1Gi + requests: + cpu: 200m + memory: 256Mi + + ingress: + enabled: true + className: nginx + annotations: + cert-manager.io/cluster-issuer: letsencrypt-prod + nginx.ingress.kubernetes.io/ssl-redirect: "true" + nginx.ingress.kubernetes.io/force-ssl-redirect: "true" + hosts: + - host: flipt.example.com + paths: + - path: / + pathType: Prefix + tls: + - secretName: flipt-tls + hosts: + - flipt.example.com + + autoscaling: + enabled: true + minReplicas: 3 + maxReplicas: 10 + targetCPUUtilizationPercentage: 70 + targetMemoryUtilizationPercentage: 80 + + podDisruptionBudget: + enabled: true + minAvailable: 2 + + serviceMonitor: + enabled: true + interval: 30s + + config: + log: + level: info + encoding: json + +postgresql: + type: embedded + embedded: + enabled: true + cluster: + instances: 3 # HA configuration + storage: + size: 50Gi + storageClass: fast-ssd + resources: + limits: + cpu: 2000m + memory: 4Gi + requests: + cpu: 500m + memory: 1Gi + backup: + enabled: true + # Configure backup to S3/MinIO + monitoring: + enabled: true + +redis: + enabled: true + architecture: replication + + auth: + enabled: true + # password will be auto-generated + + master: + persistence: + enabled: true + size: 20Gi + storageClass: fast-ssd + resources: + limits: + cpu: 1000m + memory: 2Gi + requests: + cpu: 200m + memory: 512Mi + + replica: + replicaCount: 2 + persistence: + enabled: true + size: 20Gi + resources: + limits: + cpu: 1000m + memory: 2Gi + requests: + cpu: 200m + memory: 512Mi + + metrics: + enabled: true + serviceMonitor: + enabled: true + +replicated: + enabled: true + +# Anti-affinity to spread pods across nodes +affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/name + operator: In + values: + - flipt + topologyKey: kubernetes.io/hostname diff --git a/applications/flipt/examples/sdk/golang-example.go b/applications/flipt/examples/sdk/golang-example.go new file mode 100644 index 00000000..162fd587 --- /dev/null +++ b/applications/flipt/examples/sdk/golang-example.go @@ -0,0 +1,330 @@ +/** + * Flipt Go SDK Example + * + * This example demonstrates how to integrate Flipt feature flags + * into a Go application. + * + * Install: go get go.flipt.io/flipt/rpc/flipt + */ + +package main + +import ( + "context" + "fmt" + "log" + "net/http" + "os" + "time" + + flipt "go.flipt.io/flipt/rpc/flipt" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +// FliptClient wraps the Flipt gRPC client +type FliptClient struct { + client flipt.FliptClient + conn *grpc.ClientConn +} + +// NewFliptClient creates a new Flipt client +func NewFliptClient(address string) (*FliptClient, error) { + // Connect to Flipt gRPC server + conn, err := grpc.Dial( + address, + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithBlock(), + grpc.WithTimeout(5*time.Second), + ) + if err != nil { + return nil, fmt.Errorf("failed to connect to Flipt: %w", err) + } + + client := flipt.NewFliptClient(conn) + return &FliptClient{ + client: client, + conn: conn, + }, nil +} + +// Close closes the gRPC connection +func (c *FliptClient) Close() error { + return c.conn.Close() +} + +// Example 1: Simple boolean flag evaluation +func (c *FliptClient) CheckBooleanFlag(ctx context.Context, userID string) (bool, error) { + resp, err := c.client.EvaluateBoolean(ctx, &flipt.EvaluationRequest{ + NamespaceKey: "default", + FlagKey: "new_dashboard", + EntityId: userID, + Context: map[string]string{ + "email": "user@example.com", + "plan": "enterprise", + "region": "us-east-1", + }, + }) + if err != nil { + return false, fmt.Errorf("failed to evaluate flag: %w", err) + } + + if resp.Enabled { + fmt.Println("✓ New dashboard is enabled for this user") + } else { + fmt.Println("✗ New dashboard is disabled for this user") + } + + return resp.Enabled, nil +} + +// Example 2: Variant flag evaluation (A/B testing) +func (c *FliptClient) CheckVariantFlag(ctx context.Context, userID string) (string, error) { + resp, err := c.client.EvaluateVariant(ctx, &flipt.EvaluationRequest{ + NamespaceKey: "default", + FlagKey: "checkout_flow", + EntityId: userID, + Context: map[string]string{ + "userId": userID, + "email": "test@example.com", + "accountAge": "30", + }, + }) + if err != nil { + return "control", fmt.Errorf("failed to evaluate variant: %w", err) + } + + fmt.Printf("User assigned to variant: %s\n", resp.VariantKey) + + switch resp.VariantKey { + case "control": + return "original_checkout", nil + case "variant_a": + return "streamlined_checkout", nil + case "variant_b": + return "express_checkout", nil + default: + return "original_checkout", nil + } +} + +// Example 3: Batch evaluation +func (c *FliptClient) EvaluateMultipleFlags(ctx context.Context, userID string, context map[string]string) (map[string]bool, error) { + flags := []string{"new_dashboard", "dark_mode", "beta_features"} + results := make(map[string]bool) + + for _, flagKey := range flags { + resp, err := c.client.EvaluateBoolean(ctx, &flipt.EvaluationRequest{ + NamespaceKey: "default", + FlagKey: flagKey, + EntityId: userID, + Context: context, + }) + if err != nil { + log.Printf("Error evaluating flag %s: %v", flagKey, err) + results[flagKey] = false + continue + } + results[flagKey] = resp.Enabled + } + + fmt.Printf("Feature flags for user: %+v\n", results) + return results, nil +} + +// Example 4: HTTP middleware for feature flags +type FeatureFlagMiddleware struct { + client *FliptClient +} + +func NewFeatureFlagMiddleware(client *FliptClient) *FeatureFlagMiddleware { + return &FeatureFlagMiddleware{client: client} +} + +func (m *FeatureFlagMiddleware) Handler(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + // Extract user info from request (simplified) + userID := r.Header.Get("X-User-ID") + if userID == "" { + userID = "anonymous" + } + + context := map[string]string{ + "email": r.Header.Get("X-User-Email"), + "plan": r.Header.Get("X-User-Plan"), + "ip": r.RemoteAddr, + "userAgent": r.UserAgent(), + } + + // Evaluate flags + features, err := m.client.EvaluateMultipleFlags(ctx, userID, context) + if err != nil { + log.Printf("Error loading feature flags: %v", err) + features = make(map[string]bool) // Empty on error + } + + // Add features to context + type contextKey string + const featuresKey contextKey = "features" + ctx = context.WithValue(ctx, featuresKey, features) + + next.ServeHTTP(w, r.WithContext(ctx)) + }) +} + +// Example 5: Cached client with TTL +type CachedFliptClient struct { + client *FliptClient + cache map[string]*cacheEntry + ttl time.Duration +} + +type cacheEntry struct { + value bool + timestamp time.Time +} + +func NewCachedFliptClient(client *FliptClient, ttl time.Duration) *CachedFliptClient { + return &CachedFliptClient{ + client: client, + cache: make(map[string]*cacheEntry), + ttl: ttl, + } +} + +func (c *CachedFliptClient) EvaluateBoolean(ctx context.Context, namespaceKey, flagKey, entityID string, context map[string]string) (bool, error) { + cacheKey := fmt.Sprintf("%s:%s:%s", namespaceKey, flagKey, entityID) + + // Check cache + if entry, ok := c.cache[cacheKey]; ok { + if time.Since(entry.timestamp) < c.ttl { + return entry.value, nil + } + } + + // Fetch fresh value + resp, err := c.client.client.EvaluateBoolean(ctx, &flipt.EvaluationRequest{ + NamespaceKey: namespaceKey, + FlagKey: flagKey, + EntityId: entityID, + Context: context, + }) + if err != nil { + // Return stale cache on error if available + if entry, ok := c.cache[cacheKey]; ok { + log.Printf("Using stale cache due to error: %v", err) + return entry.value, nil + } + return false, err + } + + // Update cache + c.cache[cacheKey] = &cacheEntry{ + value: resp.Enabled, + timestamp: time.Now(), + } + + return resp.Enabled, nil +} + +// Example HTTP handlers +func dashboardHandler(w http.ResponseWriter, r *http.Request) { + type contextKey string + const featuresKey contextKey = "features" + + features, ok := r.Context().Value(featuresKey).(map[string]bool) + if !ok { + features = make(map[string]bool) + } + + if features["new_dashboard"] { + fmt.Fprintf(w, "Showing new dashboard v2") + } else { + fmt.Fprintf(w, "Showing old dashboard v1") + } +} + +func configHandler(w http.ResponseWriter, r *http.Request) { + type contextKey string + const featuresKey contextKey = "features" + + features, ok := r.Context().Value(featuresKey).(map[string]bool) + if !ok { + features = make(map[string]bool) + } + + w.Header().Set("Content-Type", "application/json") + fmt.Fprintf(w, `{"features": %+v, "version": "1.0.0"}`, features) +} + +func main() { + // Get Flipt address from environment or use default + fliptAddr := os.Getenv("FLIPT_ADDR") + if fliptAddr == "" { + fliptAddr = "flipt.flipt.svc.cluster.local:9000" + } + + // Create Flipt client + client, err := NewFliptClient(fliptAddr) + if err != nil { + log.Fatalf("Failed to create Flipt client: %v", err) + } + defer client.Close() + + ctx := context.Background() + + fmt.Println("Flipt Go SDK Examples\n") + + // Example 1: Boolean flag + fmt.Println("Example 1: Boolean Flag") + _, err = client.CheckBooleanFlag(ctx, "user-123") + if err != nil { + log.Printf("Error: %v", err) + } + fmt.Println() + + // Example 2: Variant flag + fmt.Println("Example 2: Variant Flag") + variant, err := client.CheckVariantFlag(ctx, "user-456") + if err != nil { + log.Printf("Error: %v", err) + } else { + fmt.Printf("Selected checkout: %s\n", variant) + } + fmt.Println() + + // Example 3: Batch evaluation + fmt.Println("Example 3: Batch Evaluation") + _, err = client.EvaluateMultipleFlags(ctx, "user-789", map[string]string{ + "email": "user789@example.com", + "plan": "premium", + }) + if err != nil { + log.Printf("Error: %v", err) + } + fmt.Println() + + // Example 4: Cached evaluation + fmt.Println("Example 4: Cached Evaluation") + cachedClient := NewCachedFliptClient(client, 1*time.Minute) + result1, _ := cachedClient.EvaluateBoolean(ctx, "default", "new_dashboard", "user-123", nil) + fmt.Printf("First call (from API): %v\n", result1) + result2, _ := cachedClient.EvaluateBoolean(ctx, "default", "new_dashboard", "user-123", nil) + fmt.Printf("Second call (from cache): %v\n", result2) + fmt.Println() + + // Example 5: HTTP server with middleware + fmt.Println("Example 5: Starting HTTP server with feature flag middleware") + middleware := NewFeatureFlagMiddleware(client) + + mux := http.NewServeMux() + mux.HandleFunc("/dashboard", dashboardHandler) + mux.HandleFunc("/api/config", configHandler) + + handler := middleware.Handler(mux) + + fmt.Println("Server listening on :8080") + log.Fatal(http.ListenAndServe(":8080", handler)) +} diff --git a/applications/flipt/examples/sdk/nodejs-example.js b/applications/flipt/examples/sdk/nodejs-example.js new file mode 100644 index 00000000..2fea9956 --- /dev/null +++ b/applications/flipt/examples/sdk/nodejs-example.js @@ -0,0 +1,241 @@ +/** + * Flipt Node.js SDK Example + * + * This example demonstrates how to integrate Flipt feature flags + * into a Node.js application. + * + * Install: npm install @flipt-io/flipt + */ + +const { FliptClient } = require('@flipt-io/flipt'); + +// Initialize Flipt client +const flipt = new FliptClient({ + url: process.env.FLIPT_URL || 'http://flipt.flipt.svc.cluster.local:8080', + // Optional: authentication + // authentication: { + // clientToken: process.env.FLIPT_CLIENT_TOKEN + // } +}); + +// Example 1: Simple boolean flag evaluation +async function checkBooleanFlag() { + try { + const result = await flipt.evaluateBoolean({ + namespaceKey: 'default', + flagKey: 'new_dashboard', + entityId: 'user-123', + context: { + email: 'user@example.com', + plan: 'enterprise', + region: 'us-east-1' + } + }); + + if (result.enabled) { + console.log('✓ New dashboard is enabled for this user'); + return true; + } else { + console.log('✗ New dashboard is disabled for this user'); + return false; + } + } catch (error) { + console.error('Error evaluating flag:', error); + // Default to false on error (fail-safe) + return false; + } +} + +// Example 2: Variant flag evaluation (A/B testing) +async function checkVariantFlag() { + try { + const result = await flipt.evaluateVariant({ + namespaceKey: 'default', + flagKey: 'checkout_flow', + entityId: 'user-456', + context: { + userId: 'user-456', + email: 'test@example.com', + accountAge: '30' + } + }); + + console.log(`User assigned to variant: ${result.variantKey}`); + + switch (result.variantKey) { + case 'control': + return 'original_checkout'; + case 'variant_a': + return 'streamlined_checkout'; + case 'variant_b': + return 'express_checkout'; + default: + return 'original_checkout'; + } + } catch (error) { + console.error('Error evaluating variant:', error); + return 'original_checkout'; // Default variant + } +} + +// Example 3: Batch evaluation (multiple flags at once) +async function evaluateMultipleFlags(userId, context) { + try { + const flags = ['new_dashboard', 'dark_mode', 'beta_features']; + const results = {}; + + for (const flagKey of flags) { + const result = await flipt.evaluateBoolean({ + namespaceKey: 'default', + flagKey, + entityId: userId, + context + }); + results[flagKey] = result.enabled; + } + + console.log('Feature flags for user:', results); + return results; + } catch (error) { + console.error('Error evaluating flags:', error); + return {}; + } +} + +// Example 4: Using flags in Express middleware +function createFeatureFlagMiddleware(flipt) { + return async (req, res, next) => { + const userId = req.user?.id || 'anonymous'; + const context = { + email: req.user?.email || '', + plan: req.user?.plan || 'free', + ip: req.ip, + userAgent: req.get('user-agent') + }; + + try { + // Evaluate all relevant flags for this request + req.features = await evaluateMultipleFlags(userId, context); + next(); + } catch (error) { + console.error('Error loading feature flags:', error); + req.features = {}; // Empty features on error + next(); + } + }; +} + +// Example 5: Graceful degradation with caching +class FliptCache { + constructor(flipt, ttlMs = 60000) { // 1 minute default TTL + this.flipt = flipt; + this.cache = new Map(); + this.ttlMs = ttlMs; + } + + getCacheKey(namespaceKey, flagKey, entityId) { + return `${namespaceKey}:${flagKey}:${entityId}`; + } + + async evaluateBoolean(namespaceKey, flagKey, entityId, context) { + const cacheKey = this.getCacheKey(namespaceKey, flagKey, entityId); + const cached = this.cache.get(cacheKey); + + // Check if cache is valid + if (cached && Date.now() - cached.timestamp < this.ttlMs) { + return cached.value; + } + + // Fetch fresh value + try { + const result = await this.flipt.evaluateBoolean({ + namespaceKey, + flagKey, + entityId, + context + }); + + // Update cache + this.cache.set(cacheKey, { + value: result, + timestamp: Date.now() + }); + + return result; + } catch (error) { + // Return stale cache on error if available + if (cached) { + console.warn('Using stale cache due to error:', error); + return cached.value; + } + throw error; + } + } +} + +// Example usage in an Express app +const express = require('express'); +const app = express(); + +// Add feature flag middleware +app.use(createFeatureFlagMiddleware(flipt)); + +app.get('/dashboard', async (req, res) => { + if (req.features.new_dashboard) { + res.render('dashboard-v2'); + } else { + res.render('dashboard-v1'); + } +}); + +app.get('/api/config', async (req, res) => { + res.json({ + features: req.features, + version: '1.0.0' + }); +}); + +// Main execution +async function main() { + console.log('Flipt Node.js SDK Examples\n'); + + // Example 1: Boolean flag + console.log('Example 1: Boolean Flag'); + await checkBooleanFlag(); + console.log(''); + + // Example 2: Variant flag + console.log('Example 2: Variant Flag'); + const variant = await checkVariantFlag(); + console.log(`Selected checkout: ${variant}\n`); + + // Example 3: Batch evaluation + console.log('Example 3: Batch Evaluation'); + await evaluateMultipleFlags('user-789', { + email: 'user789@example.com', + plan: 'premium' + }); + console.log(''); + + // Example 4: Cached evaluation + console.log('Example 4: Cached Evaluation'); + const cachedClient = new FliptCache(flipt); + const result1 = await cachedClient.evaluateBoolean('default', 'new_dashboard', 'user-123', {}); + console.log('First call (from API):', result1.enabled); + const result2 = await cachedClient.evaluateBoolean('default', 'new_dashboard', 'user-123', {}); + console.log('Second call (from cache):', result2.enabled); +} + +// Run examples if executed directly +if (require.main === module) { + main().catch(console.error); +} + +module.exports = { + flipt, + checkBooleanFlag, + checkVariantFlag, + evaluateMultipleFlags, + createFeatureFlagMiddleware, + FliptCache +}; diff --git a/applications/flipt/examples/sdk/python-example.py b/applications/flipt/examples/sdk/python-example.py new file mode 100644 index 00000000..39d4186f --- /dev/null +++ b/applications/flipt/examples/sdk/python-example.py @@ -0,0 +1,367 @@ +""" +Flipt Python SDK Example + +This example demonstrates how to integrate Flipt feature flags +into a Python application. + +Install: pip install flipt +""" + +import os +from typing import Dict, Optional +from datetime import datetime, timedelta +import requests + + +class FliptClient: + """Simple Flipt HTTP client for Python""" + + def __init__(self, url: str = None, auth_token: str = None): + self.url = url or os.getenv("FLIPT_URL", "http://flipt.flipt.svc.cluster.local:8080") + self.auth_token = auth_token + self.session = requests.Session() + + if self.auth_token: + self.session.headers.update({"Authorization": f"Bearer {self.auth_token}"}) + + def evaluate_boolean( + self, + namespace_key: str, + flag_key: str, + entity_id: str, + context: Optional[Dict[str, str]] = None, + ) -> bool: + """Evaluate a boolean feature flag""" + url = f"{self.url}/api/v1/evaluate/v1/boolean" + + payload = { + "namespaceKey": namespace_key, + "flagKey": flag_key, + "entityId": entity_id, + "context": context or {}, + } + + try: + response = self.session.post(url, json=payload) + response.raise_for_status() + data = response.json() + return data.get("enabled", False) + except requests.exceptions.RequestException as e: + print(f"Error evaluating flag: {e}") + return False # Default to false on error + + def evaluate_variant( + self, + namespace_key: str, + flag_key: str, + entity_id: str, + context: Optional[Dict[str, str]] = None, + ) -> Optional[str]: + """Evaluate a variant feature flag""" + url = f"{self.url}/api/v1/evaluate/v1/variant" + + payload = { + "namespaceKey": namespace_key, + "flagKey": flag_key, + "entityId": entity_id, + "context": context or {}, + } + + try: + response = self.session.post(url, json=payload) + response.raise_for_status() + data = response.json() + return data.get("variantKey") + except requests.exceptions.RequestException as e: + print(f"Error evaluating variant: {e}") + return None + + +# Example 1: Simple boolean flag evaluation +def check_boolean_flag(client: FliptClient, user_id: str = "user-123") -> bool: + """Check a boolean feature flag""" + result = client.evaluate_boolean( + namespace_key="default", + flag_key="new_dashboard", + entity_id=user_id, + context={ + "email": "user@example.com", + "plan": "enterprise", + "region": "us-east-1", + }, + ) + + if result: + print("✓ New dashboard is enabled for this user") + else: + print("✗ New dashboard is disabled for this user") + + return result + + +# Example 2: Variant flag evaluation (A/B testing) +def check_variant_flag(client: FliptClient, user_id: str = "user-456") -> str: + """Check a variant feature flag""" + variant = client.evaluate_variant( + namespace_key="default", + flag_key="checkout_flow", + entity_id=user_id, + context={ + "userId": user_id, + "email": "test@example.com", + "accountAge": "30", + }, + ) + + print(f"User assigned to variant: {variant}") + + variants = { + "control": "original_checkout", + "variant_a": "streamlined_checkout", + "variant_b": "express_checkout", + } + + return variants.get(variant, "original_checkout") + + +# Example 3: Batch evaluation +def evaluate_multiple_flags( + client: FliptClient, user_id: str, context: Dict[str, str] +) -> Dict[str, bool]: + """Evaluate multiple feature flags at once""" + flags = ["new_dashboard", "dark_mode", "beta_features"] + results = {} + + for flag_key in flags: + try: + result = client.evaluate_boolean( + namespace_key="default", + flag_key=flag_key, + entity_id=user_id, + context=context, + ) + results[flag_key] = result + except Exception as e: + print(f"Error evaluating flag {flag_key}: {e}") + results[flag_key] = False + + print(f"Feature flags for user: {results}") + return results + + +# Example 4: Flask middleware +try: + from flask import Flask, request, g + from functools import wraps + + def feature_flags_middleware(client: FliptClient): + """Flask middleware to add feature flags to request context""" + + def decorator(f): + @wraps(f) + def decorated_function(*args, **kwargs): + user_id = request.headers.get("X-User-ID", "anonymous") + context = { + "email": request.headers.get("X-User-Email", ""), + "plan": request.headers.get("X-User-Plan", "free"), + "ip": request.remote_addr, + "userAgent": request.headers.get("User-Agent", ""), + } + + try: + g.features = evaluate_multiple_flags(client, user_id, context) + except Exception as e: + print(f"Error loading feature flags: {e}") + g.features = {} + + return f(*args, **kwargs) + + return decorated_function + + return decorator + + # Example Flask app + def create_app(client: FliptClient) -> Flask: + app = Flask(__name__) + + @app.route("/dashboard") + @feature_flags_middleware(client) + def dashboard(): + if g.features.get("new_dashboard", False): + return "Showing new dashboard v2" + else: + return "Showing old dashboard v1" + + @app.route("/api/config") + @feature_flags_middleware(client) + def config(): + return {"features": g.features, "version": "1.0.0"} + + return app + +except ImportError: + print("Flask not installed, skipping Flask examples") + create_app = None + + +# Example 5: Cached client with TTL +class CachedFliptClient: + """Flipt client with local caching""" + + def __init__(self, client: FliptClient, ttl_seconds: int = 60): + self.client = client + self.cache = {} + self.ttl = timedelta(seconds=ttl_seconds) + + def _get_cache_key(self, namespace_key: str, flag_key: str, entity_id: str) -> str: + return f"{namespace_key}:{flag_key}:{entity_id}" + + def evaluate_boolean( + self, + namespace_key: str, + flag_key: str, + entity_id: str, + context: Optional[Dict[str, str]] = None, + ) -> bool: + """Evaluate flag with caching""" + cache_key = self._get_cache_key(namespace_key, flag_key, entity_id) + cached = self.cache.get(cache_key) + + # Check if cache is valid + if cached: + value, timestamp = cached + if datetime.now() - timestamp < self.ttl: + return value + + # Fetch fresh value + try: + result = self.client.evaluate_boolean( + namespace_key, flag_key, entity_id, context + ) + + # Update cache + self.cache[cache_key] = (result, datetime.now()) + + return result + except Exception as e: + # Return stale cache on error if available + if cached: + print(f"Using stale cache due to error: {e}") + return cached[0] + raise + + +# Example 6: Django middleware +try: + from django.utils.deprecation import MiddlewareMixin + + class FeatureFlagMiddleware(MiddlewareMixin): + """Django middleware to add feature flags to request""" + + def __init__(self, get_response): + self.get_response = get_response + self.client = FliptClient() + + def process_request(self, request): + user_id = getattr(request.user, "id", "anonymous") + context = { + "email": getattr(request.user, "email", ""), + "plan": getattr(request.user, "plan", "free"), + "ip": request.META.get("REMOTE_ADDR", ""), + "userAgent": request.META.get("HTTP_USER_AGENT", ""), + } + + try: + request.features = evaluate_multiple_flags( + self.client, str(user_id), context + ) + except Exception as e: + print(f"Error loading feature flags: {e}") + request.features = {} + +except ImportError: + print("Django not installed, skipping Django examples") + + +# Example 7: FastAPI dependency +try: + from fastapi import FastAPI, Depends, Request + from fastapi.responses import JSONResponse + + def get_feature_flags(request: Request): + """FastAPI dependency for feature flags""" + client = FliptClient() + user_id = request.headers.get("X-User-ID", "anonymous") + context = { + "email": request.headers.get("X-User-Email", ""), + "plan": request.headers.get("X-User-Plan", "free"), + "ip": request.client.host, + "userAgent": request.headers.get("User-Agent", ""), + } + + try: + return evaluate_multiple_flags(client, user_id, context) + except Exception as e: + print(f"Error loading feature flags: {e}") + return {} + + # Example FastAPI app + def create_fastapi_app() -> FastAPI: + app = FastAPI() + + @app.get("/dashboard") + def dashboard(features: dict = Depends(get_feature_flags)): + if features.get("new_dashboard", False): + return {"message": "Showing new dashboard v2"} + else: + return {"message": "Showing old dashboard v1"} + + @app.get("/api/config") + def config(features: dict = Depends(get_feature_flags)): + return {"features": features, "version": "1.0.0"} + + return app + +except ImportError: + print("FastAPI not installed, skipping FastAPI examples") + create_fastapi_app = None + + +def main(): + """Run all examples""" + print("Flipt Python SDK Examples\n") + + # Create client + client = FliptClient() + + # Example 1: Boolean flag + print("Example 1: Boolean Flag") + check_boolean_flag(client) + print() + + # Example 2: Variant flag + print("Example 2: Variant Flag") + variant = check_variant_flag(client) + print(f"Selected checkout: {variant}\n") + + # Example 3: Batch evaluation + print("Example 3: Batch Evaluation") + evaluate_multiple_flags( + client, "user-789", {"email": "user789@example.com", "plan": "premium"} + ) + print() + + # Example 4: Cached evaluation + print("Example 4: Cached Evaluation") + cached_client = CachedFliptClient(client, ttl_seconds=60) + result1 = cached_client.evaluate_boolean("default", "new_dashboard", "user-123") + print(f"First call (from API): {result1}") + result2 = cached_client.evaluate_boolean("default", "new_dashboard", "user-123") + print(f"Second call (from cache): {result2}") + print() + + print("Examples completed!") + + +if __name__ == "__main__": + main() diff --git a/applications/flipt/replicated/ec-cluster.yaml b/applications/flipt/replicated/ec-cluster.yaml new file mode 100644 index 00000000..1fe7ba7e --- /dev/null +++ b/applications/flipt/replicated/ec-cluster.yaml @@ -0,0 +1,60 @@ +apiVersion: embeddedcluster.replicated.com/v1beta1 +kind: Config +spec: + version: "2.13.3+k8s-1.33" + extensions: + helm: + repositories: + - name: ingress-nginx + url: https://kubernetes.github.io/ingress-nginx + - name: jetstack + url: https://charts.jetstack.io + charts: + - name: cert-manager + chartname: jetstack/cert-manager + namespace: cert-manager + version: "v1.17.1" + values: | + crds: + enabled: true + image: + digest: "" + webhook: + image: + digest: "" + cainjector: + image: + digest: "" + startupapicheck: + image: + digest: "" + - name: ingress-nginx + chartname: ingress-nginx/ingress-nginx + namespace: ingress-nginx + version: "4.14.1" + values: | + controller: + service: + type: NodePort + nodePorts: + http: "80" + https: "443" + # Known issue: Only use image tags for multi-architecture images. + # Set digest to empty string to ensure the air gap builder uses + # single-architecture images. + image: + digest: "" + digestChroot: "" + admissionWebhooks: + patch: + image: + digest: "" + unsupportedOverrides: + k0s: |- + config: + spec: + workerProfiles: + - name: default + values: + allowedUnsafeSysctls: + - net.ipv4.ip_forward \ No newline at end of file diff --git a/applications/flipt/replicated/k8s-app.yaml b/applications/flipt/replicated/k8s-app.yaml new file mode 100644 index 00000000..73d2b794 --- /dev/null +++ b/applications/flipt/replicated/k8s-app.yaml @@ -0,0 +1,36 @@ +--- +# Kubernetes SIG Application Custom Resource +# Adds links/buttons to the Admin Console dashboard +# See: https://docs.replicated.com/vendor/admin-console-adding-buttons-links +apiVersion: app.k8s.io/v1beta1 +kind: Application +metadata: + name: flipt + labels: + app.kubernetes.io/name: flipt +spec: + descriptor: + type: "Feature Flag Platform" + version: "v1.61.0" + description: "Flipt is an open-source, self-hosted feature flag platform" + keywords: + - feature-flags + - feature-toggles + - experimentation + - ab-testing + links: + # Open App button - uses ingress hostname if configured + - description: Open Flipt + url: 'repl{{ if ConfigOptionEquals "ingress_enabled" "1" }}https://repl{{ ConfigOption "ingress_hostname" }}repl{{ else }}http://localhost:8080repl{{ end }}' + # Flipt Documentation + - description: Documentation + url: "https://docs.flipt.io" + # Flipt API Docs (built-in) + - description: API Reference + url: 'repl{{ if ConfigOptionEquals "ingress_enabled" "1" }}https://repl{{ ConfigOption "ingress_hostname" }}/docs/repl{{ else }}http://localhost:8080/docs/repl{{ end }}' + maintainers: + - name: Flipt + url: https://flipt.io + selector: + matchLabels: + app.kubernetes.io/name: flipt diff --git a/applications/flipt/replicated/kots-app.yaml b/applications/flipt/replicated/kots-app.yaml new file mode 100644 index 00000000..634366fd --- /dev/null +++ b/applications/flipt/replicated/kots-app.yaml @@ -0,0 +1,52 @@ +apiVersion: kots.io/v1beta1 +kind: Application +metadata: + name: flipt +spec: + title: Flipt Feature Flags + icon: https://raw.githubusercontent.com/flipt-io/flipt/main/ui/public/logo.svg + statusInformers: + - deployment/flipt + - deployment/flipt-cloudnative-pg + - deployment/flipt-valkey + ports: + - serviceName: flipt + servicePort: 8080 + localPort: 8080 + applicationUrl: "http://flipt.{{repl Namespace}}.svc.cluster.local:8080" + graphs: + - title: Flipt Pods + query: 'count(kube_pod_status_phase{namespace="{{repl Namespace}}", pod=~"flipt-.*", phase="Running"})' + legend: Running Pods + queries: + - query: 'count(kube_pod_status_phase{namespace="{{repl Namespace}}", pod=~"flipt-.*", phase="Running"})' + legend: Running + - query: 'count(kube_pod_status_phase{namespace="{{repl Namespace}}", pod=~"flipt-.*", phase="Pending"})' + legend: Pending + - title: Database Status + query: 'count(kube_pod_status_phase{namespace="{{repl Namespace}}", pod=~"{{repl ConfigOption "release_name"}}-cluster-.*", phase="Running"})' + legend: PostgreSQL Pods + when: 'repl{{ ConfigOptionEquals "postgres_type" "embedded" }}' + - title: Valkey Status + query: 'count(kube_pod_status_phase{namespace="{{repl Namespace}}", pod=~".*-valkey-.*", phase="Running"})' + legend: Valkey Pods + when: 'repl{{ ConfigOptionEquals "valkey_enabled" "1" }}' + releaseNotes: | + ## Flipt v1.61.0 + + This release includes: + - Latest Flipt v1.61.0 with enhanced performance + - PostgreSQL 16 support via CloudnativePG + - Valkey distributed caching for multi-instance deployments + - Horizontal pod autoscaling support + - Comprehensive monitoring and metrics + + For more details, visit: https://github.com/flipt-io/flipt/releases + additionalImages: + - ghcr.io/flipt-io/flipt:v1.61.0 + - ghcr.io/cloudnative-pg/cloudnative-pg:1.25.0 + - ghcr.io/cloudnative-pg/postgresql:16 + - docker.io/alpine/k8s:1.33.7 + - ghcr.io/valkey-io/valkey:8.0 + - proxy.replicated.com/library/replicated-sdk-image:1.15.0 + allowRollback: true diff --git a/applications/flipt/replicated/kots-cluster-issuer.yaml b/applications/flipt/replicated/kots-cluster-issuer.yaml new file mode 100644 index 00000000..6c395f7e --- /dev/null +++ b/applications/flipt/replicated/kots-cluster-issuer.yaml @@ -0,0 +1,16 @@ +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt-prod + annotations: + kots.io/when: '{{repl and (ConfigOptionEquals "ingress_enabled" "1") (ConfigOptionEquals "tls_enabled" "1") }}' +spec: + acme: + server: https://acme-v02.api.letsencrypt.org/directory + email: 'repl{{ ConfigOption "tls_acme_email" }}' + privateKeySecretRef: + name: letsencrypt-prod-account-key + solvers: + - http01: + ingress: + ingressClassName: nginx diff --git a/applications/flipt/replicated/kots-config.yaml b/applications/flipt/replicated/kots-config.yaml new file mode 100644 index 00000000..4553f8ff --- /dev/null +++ b/applications/flipt/replicated/kots-config.yaml @@ -0,0 +1,298 @@ +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: flipt-config +spec: + groups: + - name: basic + title: Basic Configuration + description: Essential configuration for Flipt deployment + items: + - name: release_name + title: Release Name + type: text + default: flipt + required: true + help_text: Name for this Flipt deployment (used as Helm release name) + + - name: replica_count + title: Number of Flipt Replicas + type: text + default: "2" + required: true + help_text: Number of Flipt pod replicas (2+ recommended for HA with Valkey cache) + + - name: ingress + title: Ingress Configuration + description: Configure how Flipt is accessed from outside the cluster + items: + - name: ingress_enabled + title: Enable Ingress + type: bool + default: "1" + help_text: Enable ingress for external access to Flipt + + - name: ingress_hostname + title: Hostname + type: text + default: flipt.example.com + required: false + when: 'repl{{ ConfigOptionEquals "ingress_enabled" "1" }}' + help_text: Hostname for accessing Flipt UI + + - name: ingress_class + title: Ingress Class + type: select_one + default: nginx + items: + - name: nginx + title: NGINX + - name: traefik + title: Traefik + - name: custom + title: Custom + when: 'repl{{ ConfigOptionEquals "ingress_enabled" "1" }}' + help_text: Ingress controller class to use + + - name: ingress_class_custom + title: Custom Ingress Class + type: text + default: "" + when: '{{repl and (ConfigOptionEquals "ingress_enabled" "1") (ConfigOptionEquals "ingress_class" "custom") }}' + help_text: Custom ingress class name + + - name: tls_enabled + title: Enable TLS + type: bool + default: "1" + when: 'repl{{ ConfigOptionEquals "ingress_enabled" "1" }}' + help_text: Enable TLS/HTTPS for ingress + + - name: tls_cert_manager_issuer + title: Cert-Manager Issuer + type: text + default: letsencrypt-prod + when: '{{repl and (ConfigOptionEquals "ingress_enabled" "1") (ConfigOptionEquals "tls_enabled" "1") }}' + help_text: Name of the cert-manager ClusterIssuer + + - name: tls_acme_email + title: ACME Email Address + type: text + default: "" + required: true + when: '{{repl and (ConfigOptionEquals "ingress_enabled" "1") (ConfigOptionEquals "tls_enabled" "1") }}' + help_text: Email address for Let's Encrypt certificate registration notifications + + - name: database + title: Database Configuration + description: PostgreSQL database settings for storing feature flags + items: + - name: postgres_type + title: PostgreSQL Type + type: select_one + default: embedded + items: + - name: embedded + title: Embedded PostgreSQL (CloudnativePG) + - name: external + title: External PostgreSQL Database + required: true + help_text: Use embedded PostgreSQL or connect to external database + + - name: postgres_instances + title: Number of PostgreSQL Instances + type: select_one + default: "1" + items: + - name: "1" + title: "1 (Development)" + - name: "3" + title: "3 (High Availability)" + when: 'repl{{ ConfigOptionEquals "postgres_type" "embedded" }}' + help_text: Number of PostgreSQL cluster instances (3 recommended for production) + + - name: postgres_storage_size + title: PostgreSQL Storage Size + type: text + default: 10Gi + when: 'repl{{ ConfigOptionEquals "postgres_type" "embedded" }}' + required: true + help_text: Storage size for PostgreSQL data (e.g., 10Gi, 50Gi, 100Gi) + + - name: postgres_storage_class + title: PostgreSQL Storage Class + type: text + default: "" + when: 'repl{{ ConfigOptionEquals "postgres_type" "embedded" }}' + help_text: Storage class for PostgreSQL PVCs (leave empty for default) + + - name: postgres_resources_cpu + title: PostgreSQL CPU Limit + type: text + default: 1000m + when: 'repl{{ ConfigOptionEquals "postgres_type" "embedded" }}' + help_text: CPU limit for PostgreSQL pods (e.g., 500m, 1000m, 2000m) + + - name: postgres_resources_memory + title: PostgreSQL Memory Limit + type: text + default: 1Gi + when: 'repl{{ ConfigOptionEquals "postgres_type" "embedded" }}' + help_text: Memory limit for PostgreSQL pods (e.g., 512Mi, 1Gi, 2Gi) + + - name: external_postgres_host + title: External PostgreSQL Host + type: text + default: postgresql.example.com + when: 'repl{{ ConfigOptionEquals "postgres_type" "external" }}' + required: true + help_text: Hostname or IP address of external PostgreSQL server + + - name: external_postgres_port + title: External PostgreSQL Port + type: text + default: "5432" + when: 'repl{{ ConfigOptionEquals "postgres_type" "external" }}' + required: true + help_text: Port number of external PostgreSQL server + + - name: external_postgres_database + title: Database Name + type: text + default: flipt + when: 'repl{{ ConfigOptionEquals "postgres_type" "external" }}' + required: true + help_text: Name of the database to use + + - name: external_postgres_username + title: Database Username + type: text + default: flipt + when: 'repl{{ ConfigOptionEquals "postgres_type" "external" }}' + required: true + help_text: Username for database authentication + + - name: external_postgres_password + title: Database Password + type: password + when: 'repl{{ ConfigOptionEquals "postgres_type" "external" }}' + required: true + help_text: Password for database authentication + + - name: external_postgres_sslmode + title: SSL Mode + type: select_one + default: require + items: + - name: disable + title: Disable + - name: require + title: Require + - name: verify-ca + title: Verify CA + - name: verify-full + title: Verify Full + when: 'repl{{ ConfigOptionEquals "postgres_type" "external" }}' + help_text: SSL mode for PostgreSQL connection + + - name: valkey + title: Valkey Configuration + description: Valkey cache settings for high-performance multi-instance deployments (Redis-compatible) + items: + - name: valkey_enabled + title: Enable Valkey Cache + type: bool + default: "1" + help_text: Enable Valkey for distributed caching (required for multiple Flipt replicas) + recommended: true + + - name: resources + title: Resource Configuration + description: Configure CPU and memory resources for Flipt + items: + - name: flipt_cpu_request + title: Flipt CPU Request + type: text + default: 100m + help_text: CPU request for Flipt pods (e.g., 100m, 250m, 500m) + + - name: flipt_cpu_limit + title: Flipt CPU Limit + type: text + default: 500m + help_text: CPU limit for Flipt pods (e.g., 500m, 1000m, 2000m) + + - name: flipt_memory_request + title: Flipt Memory Request + type: text + default: 128Mi + help_text: Memory request for Flipt pods (e.g., 128Mi, 256Mi, 512Mi) + + - name: flipt_memory_limit + title: Flipt Memory Limit + type: text + default: 512Mi + help_text: Memory limit for Flipt pods (e.g., 512Mi, 1Gi, 2Gi) + + - name: advanced + title: Advanced Configuration + description: Advanced settings for production deployments + items: + - name: enable_autoscaling + title: Enable Horizontal Pod Autoscaling + type: bool + default: "0" + help_text: Enable HPA to automatically scale Flipt pods based on CPU/memory usage + + - name: hpa_min_replicas + title: HPA Minimum Replicas + type: text + default: "2" + when: 'repl{{ ConfigOptionEquals "enable_autoscaling" "1" }}' + help_text: Minimum number of replicas for autoscaling + + - name: hpa_max_replicas + title: HPA Maximum Replicas + type: text + default: "10" + when: 'repl{{ ConfigOptionEquals "enable_autoscaling" "1" }}' + help_text: Maximum number of replicas for autoscaling + + - name: hpa_cpu_target + title: HPA CPU Target Percentage + type: text + default: "80" + when: 'repl{{ ConfigOptionEquals "enable_autoscaling" "1" }}' + help_text: Target CPU utilization percentage for autoscaling + + - name: enable_metrics + title: Enable Prometheus Metrics + type: bool + default: "0" + help_text: Enable ServiceMonitor for Prometheus metrics collection + + - name: log_level + title: Log Level + type: select_one + default: info + items: + - name: debug + title: Debug + - name: info + title: Info + - name: warn + title: Warning + - name: error + title: Error + help_text: Logging verbosity level + + - name: log_encoding + title: Log Encoding + type: select_one + default: json + items: + - name: console + title: Console + - name: json + title: JSON + help_text: Log output format diff --git a/applications/flipt/replicated/kots-helm-chart.yaml b/applications/flipt/replicated/kots-helm-chart.yaml new file mode 100644 index 00000000..c4349d40 --- /dev/null +++ b/applications/flipt/replicated/kots-helm-chart.yaml @@ -0,0 +1,274 @@ +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: flipt +spec: + chart: + name: flipt + chartVersion: 1.0.33 + + exclude: "" + + # Weight determines install order (lower = first) + weight: 1 + + # helmUpgradeFlags specifies additional flags for helm upgrade + helmUpgradeFlags: + - --timeout + - 15m + - --wait + - --wait-for-jobs + - --history-max=15 + + # Values for customer environment (uses template functions) + values: + global: + imageRegistry: 'repl{{ HasLocalRegistry | ternary LocalRegistryHost "" }}' + + replicaCount: repl{{ ConfigOption "replica_count" | ParseInt }} + + image: + registry: 'repl{{ HasLocalRegistry | ternary LocalRegistryHost "ghcr.io" }}' + repository: 'repl{{ HasLocalRegistry | ternary LocalRegistryNamespace "flipt-io" }}/flipt' + tag: "v1.61.0" + pullPolicy: IfNotPresent + + resources: + requests: + cpu: 'repl{{ ConfigOption "flipt_cpu_request" }}' + memory: 'repl{{ ConfigOption "flipt_memory_request" }}' + limits: + cpu: 'repl{{ ConfigOption "flipt_cpu_limit" }}' + memory: 'repl{{ ConfigOption "flipt_memory_limit" }}' + + config: + log: + level: 'repl{{ ConfigOption "log_level" }}' + encoding: 'repl{{ ConfigOption "log_encoding" }}' + + server: + protocol: http + host: 0.0.0.0 + httpPort: 8080 + grpcPort: 9000 + + db: + maxIdleConn: 10 + maxOpenConn: 50 + connMaxLifetime: 1h + + cache: + enabled: repl{{ ConfigOptionEquals "valkey_enabled" "1" }} + backend: 'repl{{ ConfigOptionEquals "valkey_enabled" "1" | ternary "redis" "memory" }}' + ttl: 5m + redis: + mode: single + prefix: "flipt" + + cors: + enabled: true + allowedOrigins: + - "*" + + service: + type: ClusterIP + httpPort: 8080 + grpcPort: 9000 + + autoscaling: + enabled: repl{{ ConfigOptionEquals "enable_autoscaling" "1" }} + minReplicas: repl{{ ConfigOption "hpa_min_replicas" | ParseInt }} + maxReplicas: repl{{ ConfigOption "hpa_max_replicas" | ParseInt }} + targetCPUUtilizationPercentage: repl{{ ConfigOption "hpa_cpu_target" | ParseInt }} + + podDisruptionBudget: + enabled: true + minAvailable: 1 + + serviceMonitor: + enabled: repl{{ ConfigOptionEquals "enable_metrics" "1" }} + + # CloudNativePG operator (subchart) + cloudnative-pg: + enabled: true + + # PostgreSQL configuration + postgresql: + type: 'repl{{ ConfigOption "postgres_type" }}' + + database: flipt + username: flipt + + embedded: + enabled: repl{{ ConfigOptionEquals "postgres_type" "embedded" }} + cluster: + instances: repl{{ ConfigOption "postgres_instances" | ParseInt }} + imageName: 'repl{{ HasLocalRegistry | ternary (printf "%s/%s/postgresql:16" LocalRegistryHost LocalRegistryNamespace) "ghcr.io/cloudnative-pg/postgresql:16" }}' + storage: + size: 'repl{{ ConfigOption "postgres_storage_size" }}' + storageClass: 'repl{{ ConfigOption "postgres_storage_class" }}' + resources: + requests: + cpu: 100m + memory: 256Mi + limits: + cpu: 'repl{{ ConfigOption "postgres_resources_cpu" }}' + memory: 'repl{{ ConfigOption "postgres_resources_memory" }}' + backup: + enabled: false + monitoring: + enabled: false + + # Valkey configuration (Redis-compatible) + valkey: + enabled: repl{{ ConfigOptionEquals "valkey_enabled" "1" }} + + image: + repository: 'repl{{ HasLocalRegistry | ternary (printf "%s/%s/valkey" LocalRegistryHost LocalRegistryNamespace) "ghcr.io/valkey-io/valkey" }}' + tag: "8.0" + + auth: + enable: false + + # Note: Official Valkey chart v0.2.0 has a bug with PVC creation + # Using emptyDir for cache (data is ephemeral anyway) + storage: + requestedSize: null + className: null + + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 500m + memory: 512Mi + + # Database creation Job image + dbJob: + image: + registry: 'repl{{ HasLocalRegistry | ternary LocalRegistryHost "docker.io" }}' + repository: 'repl{{ HasLocalRegistry | ternary (printf "%s/k8s" LocalRegistryNamespace) "alpine/k8s" }}' + tag: "1.33.7" + + # Replicated SDK + replicated: + enabled: true + integration: + enabled: true + + # Optional values applied conditionally + optionalValues: + # Rewrite Replicated SDK image for local registry + - when: 'repl{{ HasLocalRegistry }}' + recursiveMerge: true + values: + replicated: + image: + registry: 'repl{{ LocalRegistryHost }}' + repository: 'repl{{ LocalRegistryNamespace }}/replicated-sdk-image' + + # Ingress configuration (without TLS) + - when: '{{repl and (ConfigOptionEquals "ingress_enabled" "1") (not (ConfigOptionEquals "tls_enabled" "1")) }}' + recursiveMerge: false + values: + ingress: + enabled: true + className: 'repl{{ if ConfigOptionEquals "ingress_class" "custom" }}repl{{ ConfigOption "ingress_class_custom" }}repl{{ else }}repl{{ ConfigOption "ingress_class" }}repl{{ end }}' + hosts: + - host: 'repl{{ ConfigOption "ingress_hostname" }}' + paths: + - path: / + pathType: Prefix + + # Ingress configuration (with TLS + cert-manager) + - when: '{{repl and (ConfigOptionEquals "ingress_enabled" "1") (ConfigOptionEquals "tls_enabled" "1") }}' + recursiveMerge: false + values: + ingress: + enabled: true + className: 'repl{{ if ConfigOptionEquals "ingress_class" "custom" }}repl{{ ConfigOption "ingress_class_custom" }}repl{{ else }}repl{{ ConfigOption "ingress_class" }}repl{{ end }}' + annotations: + cert-manager.io/cluster-issuer: 'repl{{ ConfigOption "tls_cert_manager_issuer" }}' + hosts: + - host: 'repl{{ ConfigOption "ingress_hostname" }}' + paths: + - path: / + pathType: Prefix + tls: + - secretName: flipt-tls + hosts: + - 'repl{{ ConfigOption "ingress_hostname" }}' + + # External PostgreSQL + - when: 'repl{{ ConfigOptionEquals "postgres_type" "external" }}' + recursiveMerge: false + values: + postgresql: + external: + enabled: true + host: 'repl{{ ConfigOption "external_postgres_host" }}' + port: repl{{ ConfigOption "external_postgres_port" | ParseInt }} + database: 'repl{{ ConfigOption "external_postgres_database" }}' + username: 'repl{{ ConfigOption "external_postgres_username" }}' + password: 'repl{{ ConfigOption "external_postgres_password" }}' + sslMode: 'repl{{ ConfigOption "external_postgres_sslmode" }}' + + # Backup labels for KOTS snapshots + - when: "repl{{ LicenseFieldValue `isSnapshotSupported` }}" + recursiveMerge: true + values: + podLabels: + kots.io/backup: velero + podAnnotations: + kots.io/app-slug: repl{{ LicenseFieldValue "appSlug" }} + + # Builder values for air gap support + # These must be static/hardcoded to ensure all images are included + builder: + replicaCount: 2 + + # Flipt application image + image: + registry: ghcr.io + repository: flipt-io/flipt + tag: "v1.61.0" + + # CloudNativePG operator (subchart) + cloudnative-pg: + enabled: true + image: + repository: ghcr.io/cloudnative-pg/cloudnative-pg + tag: "1.25.0" + + # PostgreSQL configuration + postgresql: + type: embedded + embedded: + enabled: true + cluster: + instances: 1 + # PostgreSQL database image (managed by CNPG operator) + imageName: ghcr.io/cloudnative-pg/postgresql:16 + + # kubectl image used in database creation Job + dbJob: + image: + registry: docker.io + repository: alpine/k8s + tag: "1.33.7" + + # Valkey cache (Redis-compatible) + valkey: + enabled: true + image: + repository: ghcr.io/valkey-io/valkey + tag: "8.0" + + # Replicated SDK + replicated: + enabled: true + image: + registry: proxy.replicated.com + repository: library/replicated-sdk-image + tag: "1.15.0" diff --git a/applications/flipt/replicated/kots-lint-config.yaml b/applications/flipt/replicated/kots-lint-config.yaml new file mode 100644 index 00000000..91d670ae --- /dev/null +++ b/applications/flipt/replicated/kots-lint-config.yaml @@ -0,0 +1,10 @@ +apiVersion: kots.io/v1beta1 +kind: LintConfig +metadata: + name: default-lint-config +spec: + rules: + - name: application-icon + level: "off" + - name: nonexistent-status-informer-object + level: "off" \ No newline at end of file diff --git a/applications/flipt/tests/smoke/test-flipt.py b/applications/flipt/tests/smoke/test-flipt.py new file mode 100644 index 00000000..f8fab819 --- /dev/null +++ b/applications/flipt/tests/smoke/test-flipt.py @@ -0,0 +1,126 @@ +""" +Quick test script for a running Flipt instance. + +1. Creates a boolean flag via the Flipt API +2. Enables it +3. Evaluates it via the evaluation API +4. Cleans up + +Usage: + pip install requests + export FLIPT_URL=http://flipt.example.com # defaults to http://localhost:8080 + python test-flipt.py +""" + +import requests +import os +import sys + +FLIPT_URL = os.getenv("FLIPT_URL", "http://localhost:8080") +NAMESPACE = "default" +FLAG_KEY = "test-flag" + + +def main(): + session = requests.Session() + base = FLIPT_URL.rstrip("/") + + # 1. Health check + print(f"Checking Flipt health at {base} ...") + try: + resp = session.get(f"{base}/health", timeout=10) + resp.raise_for_status() + print(f" Health: {resp.json()}\n") + except requests.exceptions.RequestException as e: + print(f" Cannot reach Flipt: {e}") + sys.exit(1) + + # 2. Create a boolean flag + print(f"Creating boolean flag '{FLAG_KEY}' ...") + resp = session.post( + f"{base}/api/v1/namespaces/{NAMESPACE}/flags", + json={ + "key": FLAG_KEY, + "name": "Test Flag", + "type": "BOOLEAN_FLAG_TYPE", + "description": "Temporary flag created by test script", + "enabled": True, + }, + ) + if resp.status_code == 200: + print(f" Created: {resp.json()['key']}\n") + elif resp.status_code == 409: + print(f" Flag already exists, continuing.\n") + else: + print(f" Unexpected response: {resp.status_code} {resp.text}") + sys.exit(1) + + # 3. Enable the flag (update it) + print(f"Enabling flag '{FLAG_KEY}' ...") + resp = session.put( + f"{base}/api/v1/namespaces/{NAMESPACE}/flags/{FLAG_KEY}", + json={ + "key": FLAG_KEY, + "name": "Test Flag", + "type": "BOOLEAN_FLAG_TYPE", + "enabled": True, + }, + ) + if resp.status_code == 200: + print(f" Enabled: {resp.json().get('enabled')}\n") + else: + print(f" Update response: {resp.status_code} {resp.text}\n") + + # 4. Create a boolean rollout (100% true) + print(f"Creating rollout rule (100% true) ...") + resp = session.post( + f"{base}/api/v1/namespaces/{NAMESPACE}/flags/{FLAG_KEY}/rollouts", + json={ + "rank": 1, + "type": "THRESHOLD_ROLLOUT_TYPE", + "threshold": { + "percentage": 100.0, + "value": True, + }, + }, + ) + if resp.status_code == 200: + print(f" Rollout created.\n") + elif resp.status_code == 409: + print(f" Rollout already exists, continuing.\n") + else: + print(f" Rollout response: {resp.status_code} {resp.text}\n") + + # 5. Evaluate the flag + print(f"Evaluating flag '{FLAG_KEY}' ...") + resp = session.post( + f"{base}/evaluate/v1/boolean", + json={ + "namespaceKey": NAMESPACE, + "flagKey": FLAG_KEY, + "entityId": "test-user-1", + "context": {"plan": "enterprise"}, + }, + ) + if resp.status_code == 200: + data = resp.json() + print(f" Enabled: {data.get('enabled')}") + print(f" Reason: {data.get('reason')}\n") + else: + print(f" Evaluation response: {resp.status_code} {resp.text}\n") + + # 6. Clean up - delete the flag + print(f"Cleaning up - deleting flag '{FLAG_KEY}' ...") + resp = session.delete( + f"{base}/api/v1/namespaces/{NAMESPACE}/flags/{FLAG_KEY}", + ) + if resp.status_code == 200: + print(" Deleted.\n") + else: + print(f" Delete response: {resp.status_code} {resp.text}\n") + + print("Done!") + + +if __name__ == "__main__": + main()